Merge pull request #3729 from jwcesign/cronhpa-imp
feat: Support CronFederatedHPA
This commit is contained in:
commit
3909fccf7d
|
@ -209,3 +209,17 @@ webhooks:
|
|||
sideEffects: None
|
||||
admissionReviewVersions: [ "v1" ]
|
||||
timeoutSeconds: 10
|
||||
- name: cronfederatedhpa.karmada.io
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: ["autoscaling.karmada.io"]
|
||||
apiVersions: ["*"]
|
||||
resources: ["cronfederatedhpas"]
|
||||
scope: "Namespaced"
|
||||
clientConfig:
|
||||
url: https://karmada-webhook.karmada-system.svc:443/validate-cronfederatedhpa
|
||||
caBundle: {{caBundle}}
|
||||
failurePolicy: Fail
|
||||
sideEffects: None
|
||||
admissionReviewVersions: [ "v1" ]
|
||||
timeoutSeconds: 10
|
||||
|
|
|
@ -3,5 +3,7 @@ FROM alpine:3.18.2
|
|||
ARG BINARY
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
#tzdata is used to parse the time zone information when using CronFederatedHPA
|
||||
RUN apk add --no-cache tzdata
|
||||
|
||||
COPY ${BINARY} /bin/${BINARY}
|
||||
|
|
|
@ -4,5 +4,7 @@ ARG BINARY
|
|||
ARG TARGETPLATFORM
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
#tzdata is used to parse the time zone information when using CronFederatedHPA
|
||||
RUN apk add --no-cache tzdata
|
||||
|
||||
COPY ${TARGETPLATFORM}/${BINARY} /bin/${BINARY}
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/controllers/binding"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/cluster"
|
||||
controllerscontext "github.com/karmada-io/karmada/pkg/controllers/context"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/cronfederatedhpa"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/execution"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/federatedhpa"
|
||||
metricsclient "github.com/karmada-io/karmada/pkg/controllers/federatedhpa/metrics"
|
||||
|
@ -204,6 +205,7 @@ func init() {
|
|||
controllers["gracefulEviction"] = startGracefulEvictionController
|
||||
controllers["applicationFailover"] = startApplicationFailoverController
|
||||
controllers["federatedHorizontalPodAutoscaler"] = startFederatedHorizontalPodAutoscalerController
|
||||
controllers["cronFederatedHorizontalPodAutoscaler"] = startCronFederatedHorizontalPodAutoscalerController
|
||||
}
|
||||
|
||||
func startClusterController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
|
@ -591,6 +593,18 @@ func startFederatedHorizontalPodAutoscalerController(ctx controllerscontext.Cont
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func startCronFederatedHorizontalPodAutoscalerController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
cronFHPAController := cronfederatedhpa.CronFHPAController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(cronfederatedhpa.ControllerName),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err = cronFHPAController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// setupControllers initialize controllers and setup one by one.
|
||||
func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) {
|
||||
restConfig := mgr.GetConfig()
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/webhook/clusteroverridepolicy"
|
||||
"github.com/karmada-io/karmada/pkg/webhook/clusterpropagationpolicy"
|
||||
"github.com/karmada-io/karmada/pkg/webhook/configuration"
|
||||
"github.com/karmada-io/karmada/pkg/webhook/cronfederatedhpa"
|
||||
"github.com/karmada-io/karmada/pkg/webhook/federatedhpa"
|
||||
"github.com/karmada-io/karmada/pkg/webhook/federatedresourcequota"
|
||||
"github.com/karmada-io/karmada/pkg/webhook/multiclusteringress"
|
||||
|
@ -129,6 +130,7 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
hookServer.Register("/validate-resourceinterpreterwebhookconfiguration", &webhook.Admission{Handler: &configuration.ValidatingAdmission{}})
|
||||
hookServer.Register("/validate-federatedresourcequota", &webhook.Admission{Handler: &federatedresourcequota.ValidatingAdmission{}})
|
||||
hookServer.Register("/validate-federatedhpa", &webhook.Admission{Handler: &federatedhpa.ValidatingAdmission{}})
|
||||
hookServer.Register("/validate-cronfederatedhpa", &webhook.Admission{Handler: &cronfederatedhpa.ValidatingAdmission{}})
|
||||
hookServer.Register("/validate-resourceinterpretercustomization", &webhook.Admission{Handler: &resourceinterpretercustomization.ValidatingAdmission{Client: hookManager.GetClient()}})
|
||||
hookServer.Register("/validate-multiclusteringress", &webhook.Admission{Handler: &multiclusteringress.ValidatingAdmission{}})
|
||||
hookServer.Register("/mutate-federatedhpa", &webhook.Admission{Handler: &federatedhpa.MutatingAdmission{}})
|
||||
|
|
5
go.mod
5
go.mod
|
@ -3,9 +3,11 @@ module github.com/karmada-io/karmada
|
|||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/adhocore/gronx v1.6.3
|
||||
github.com/distribution/distribution/v3 v3.0.0-20210507173845-9329f6a62b67
|
||||
github.com/emirpasic/gods v1.18.1
|
||||
github.com/evanphx/json-patch/v5 v5.6.0
|
||||
github.com/go-co-op/gocron v1.30.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/go-cmp v0.5.9
|
||||
|
@ -133,7 +135,8 @@ require (
|
|||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.2 // indirect
|
||||
github.com/rogpeppe/go-internal v1.6.1 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/rs/zerolog v1.26.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
|
|
10
go.sum
10
go.sum
|
@ -81,6 +81,8 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
|
|||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/adhocore/gronx v1.6.3 h1:bnm5vieTrY3QQPpsfB0hrAaeaHDpuZTUC2LLCVMLe9c=
|
||||
github.com/adhocore/gronx v1.6.3/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
|
@ -246,6 +248,8 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H
|
|||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/go-co-op/gocron v1.30.1 h1:tjWUvJl5KrcwpkEkSXFSQFr4F9h5SfV/m4+RX0cV2fs=
|
||||
github.com/go-co-op/gocron v1.30.1/go.mod h1:39f6KNSGVOU1LO/ZOoZfcSxwlsJDQOKSu8erN0SH48Y=
|
||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
|
@ -649,6 +653,7 @@ github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvI
|
|||
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
|
@ -702,11 +707,14 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
|
|||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=
|
||||
github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
|
||||
github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package v1alpha1
|
||||
|
||||
const (
|
||||
// FederatedHPAKind is the kind of FederatedHPA in group autoscaling.karmada.io
|
||||
FederatedHPAKind = "FederatedHPA"
|
||||
|
||||
// QuerySourceAnnotationKey is the annotation used in karmada-metrics-adapter to
|
||||
// record the query source cluster
|
||||
QuerySourceAnnotationKey = "resource.karmada.io/query-from-cluster"
|
||||
|
|
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
Copyright 2023 The Karmada Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cronfederatedhpa
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
|
||||
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
const (
|
||||
// ControllerName is the controller name that will be used when reporting events.
|
||||
ControllerName = "cronfederatedhpa-controller"
|
||||
)
|
||||
|
||||
// CronFHPAController is used to operate CronFederatedHPA.
|
||||
type CronFHPAController struct {
|
||||
client.Client // used to operate Cron resources.
|
||||
EventRecorder record.EventRecorder
|
||||
|
||||
RateLimiterOptions ratelimiterflag.Options
|
||||
CronHandler *CronHandler
|
||||
}
|
||||
|
||||
// Reconcile performs a full reconciliation for the object referred to by the Request.
|
||||
// The Controller will requeue the Request to be processed again if an error is non-nil or
|
||||
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
|
||||
func (c *CronFHPAController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
|
||||
klog.V(4).Infof("Reconciling CronFederatedHPA %s", req.NamespacedName)
|
||||
|
||||
cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{}
|
||||
if err := c.Client.Get(ctx, req.NamespacedName, cronFHPA); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(4).Infof("Begin to cleanup the cron jobs for CronFederatedHPA:%s", req.NamespacedName)
|
||||
c.CronHandler.StopCronFHPAExecutor(req.NamespacedName.String())
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
klog.Errorf("Fail to get CronFederatedHPA(%s):%v", req.NamespacedName, err)
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
|
||||
// If this CronFederatedHPA is deleting, stop all related cron executors
|
||||
if !cronFHPA.DeletionTimestamp.IsZero() {
|
||||
c.CronHandler.StopCronFHPAExecutor(req.NamespacedName.String())
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
origRuleSets := sets.New[string]()
|
||||
for _, history := range cronFHPA.Status.ExecutionHistories {
|
||||
origRuleSets.Insert(history.RuleName)
|
||||
}
|
||||
|
||||
// If scale target is updated, stop all the rule executors, and next steps will create the new executors
|
||||
if c.CronHandler.CronFHPAScaleTargetRefUpdates(req.NamespacedName.String(), cronFHPA.Spec.ScaleTargetRef) {
|
||||
c.CronHandler.StopCronFHPAExecutor(req.NamespacedName.String())
|
||||
}
|
||||
|
||||
c.CronHandler.AddCronExecutorIfNotExist(req.NamespacedName.String())
|
||||
|
||||
newRuleSets := sets.New[string]()
|
||||
for _, rule := range cronFHPA.Spec.Rules {
|
||||
if err := c.processCronRule(cronFHPA, rule); err != nil {
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
newRuleSets.Insert(rule.Name)
|
||||
}
|
||||
|
||||
// If rule is deleted, remove the rule executor from the handler
|
||||
for name := range origRuleSets {
|
||||
if newRuleSets.Has(name) {
|
||||
continue
|
||||
}
|
||||
c.CronHandler.StopRuleExecutor(req.NamespacedName.String(), name)
|
||||
if err := c.removeCronFHPAHistory(cronFHPA, name); err != nil {
|
||||
return controllerruntime.Result{Requeue: true}, err
|
||||
}
|
||||
}
|
||||
|
||||
return controllerruntime.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager creates a controller and register to controller manager.
|
||||
func (c *CronFHPAController) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
c.CronHandler = NewCronHandler(mgr.GetClient(), mgr.GetEventRecorderFor(ControllerName))
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
For(&autoscalingv1alpha1.CronFederatedHPA{}).
|
||||
WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}).
|
||||
Complete(c)
|
||||
}
|
||||
|
||||
// processCronRule processes the cron rule
|
||||
func (c *CronFHPAController) processCronRule(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, rule autoscalingv1alpha1.CronFederatedHPARule) error {
|
||||
cronFHPAKey := helper.GetCronFederatedHPAKey(cronFHPA)
|
||||
if ruleOld, exists := c.CronHandler.RuleCronExecutorExists(cronFHPAKey, rule.Name); exists {
|
||||
if equality.Semantic.DeepEqual(ruleOld, rule) {
|
||||
return nil
|
||||
}
|
||||
c.CronHandler.StopRuleExecutor(cronFHPAKey, rule.Name)
|
||||
}
|
||||
|
||||
if !helper.IsCronFederatedHPARuleSuspend(rule) {
|
||||
if err := c.CronHandler.CreateCronJobForExecutor(cronFHPA, rule); err != nil {
|
||||
c.EventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "StartRuleFailed", err.Error())
|
||||
klog.Errorf("Fail to start cron for CronFederatedHPA(%s) rule(%s):%v", cronFHPAKey, rule.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.updateRuleHistory(cronFHPA, rule); err != nil {
|
||||
c.EventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "UpdateCronFederatedHPAFailed", err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateRuleHistory updates the rule history
|
||||
func (c *CronFHPAController) updateRuleHistory(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, rule autoscalingv1alpha1.CronFederatedHPARule) error {
|
||||
var nextExecutionTime *metav1.Time
|
||||
if !helper.IsCronFederatedHPARuleSuspend(rule) {
|
||||
// If rule is not suspended, we should set the nextExecutionTime filed, or the nextExecutionTime will be nil
|
||||
next, err := c.CronHandler.GetRuleNextExecuteTime(cronFHPA, rule.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Fail to get next execution time for CronFederatedHPA(%s/%s) rule(%s):%v",
|
||||
cronFHPA.Namespace, cronFHPA.Name, rule.Name, err)
|
||||
return err
|
||||
}
|
||||
nextExecutionTime = &metav1.Time{Time: next}
|
||||
}
|
||||
|
||||
exists := false
|
||||
for index, history := range cronFHPA.Status.ExecutionHistories {
|
||||
if history.RuleName != rule.Name {
|
||||
continue
|
||||
}
|
||||
exists = true
|
||||
cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = nextExecutionTime
|
||||
break
|
||||
}
|
||||
|
||||
if !exists {
|
||||
ruleHistory := autoscalingv1alpha1.ExecutionHistory{
|
||||
RuleName: rule.Name,
|
||||
NextExecutionTime: nextExecutionTime,
|
||||
}
|
||||
cronFHPA.Status.ExecutionHistories = append(cronFHPA.Status.ExecutionHistories, ruleHistory)
|
||||
}
|
||||
|
||||
if err := c.Client.Status().Update(context.Background(), cronFHPA); err != nil {
|
||||
klog.Errorf("Fail to update CronFederatedHPA(%s/%s) rule(%s)'s next execution time:%v",
|
||||
cronFHPA.Namespace, cronFHPA.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeCronFHPAHistory removes the rule history in status
|
||||
func (c *CronFHPAController) removeCronFHPAHistory(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, ruleName string) error {
|
||||
exists := false
|
||||
for index, history := range cronFHPA.Status.ExecutionHistories {
|
||||
if history.RuleName != ruleName {
|
||||
continue
|
||||
}
|
||||
cronFHPA.Status.ExecutionHistories = append(cronFHPA.Status.ExecutionHistories[:index], cronFHPA.Status.ExecutionHistories[index+1:]...)
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
if err := c.Client.Status().Update(context.Background(), cronFHPA); err != nil {
|
||||
c.EventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "UpdateCronFederatedHPAFailed", err.Error())
|
||||
klog.Errorf("Fail to remove CronFederatedHPA(%s/%s) rule(%s) history:%v", cronFHPA.Namespace, cronFHPA.Name, ruleName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
/*
|
||||
Copyright 2023 The Karmada Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cronfederatedhpa
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
_ "time/tzdata"
|
||||
|
||||
"github.com/go-co-op/gocron"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
type RuleCron struct {
|
||||
*gocron.Scheduler
|
||||
autoscalingv1alpha1.CronFederatedHPARule
|
||||
}
|
||||
|
||||
type CronHandler struct {
|
||||
client client.Client
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
// cronExecutorMap is [cronFederatedHPA name][rule name]RuleCron
|
||||
cronExecutorMap map[string]map[string]RuleCron
|
||||
executorLock sync.RWMutex
|
||||
|
||||
// cronFHPAScaleTargetMap is [cronFHPA name]CrossVersionObjectReference
|
||||
cronFHPAScaleTargetMap map[string]autoscalingv2.CrossVersionObjectReference
|
||||
scaleTargetLock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewCronHandler creates new cron handler
|
||||
func NewCronHandler(client client.Client, eventRecorder record.EventRecorder) *CronHandler {
|
||||
return &CronHandler{
|
||||
client: client,
|
||||
eventRecorder: eventRecorder,
|
||||
cronExecutorMap: make(map[string]map[string]RuleCron),
|
||||
cronFHPAScaleTargetMap: make(map[string]autoscalingv2.CrossVersionObjectReference),
|
||||
}
|
||||
}
|
||||
|
||||
// CronFHPAScaleTargetRefUpdates checks if the scale target changed
|
||||
func (c *CronHandler) CronFHPAScaleTargetRefUpdates(cronFHPAKey string, scaleTarget autoscalingv2.CrossVersionObjectReference) bool {
|
||||
c.scaleTargetLock.Lock()
|
||||
defer c.scaleTargetLock.Unlock()
|
||||
|
||||
origTarget, ok := c.cronFHPAScaleTargetMap[cronFHPAKey]
|
||||
if !ok {
|
||||
c.cronFHPAScaleTargetMap[cronFHPAKey] = scaleTarget
|
||||
return false
|
||||
}
|
||||
|
||||
return !equality.Semantic.DeepEqual(origTarget, scaleTarget)
|
||||
}
|
||||
|
||||
// AddCronExecutorIfNotExist creates the executor for CronFederatedHPA if not exist
|
||||
func (c *CronHandler) AddCronExecutorIfNotExist(cronFHPAKey string) {
|
||||
c.executorLock.Lock()
|
||||
defer c.executorLock.Unlock()
|
||||
|
||||
if _, ok := c.cronExecutorMap[cronFHPAKey]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
c.cronExecutorMap[cronFHPAKey] = make(map[string]RuleCron)
|
||||
}
|
||||
|
||||
func (c *CronHandler) RuleCronExecutorExists(cronFHPAKey string,
|
||||
ruleName string) (autoscalingv1alpha1.CronFederatedHPARule, bool) {
|
||||
c.executorLock.RLock()
|
||||
defer c.executorLock.RUnlock()
|
||||
|
||||
if _, ok := c.cronExecutorMap[cronFHPAKey]; !ok {
|
||||
return autoscalingv1alpha1.CronFederatedHPARule{}, false
|
||||
}
|
||||
cronRule, exists := c.cronExecutorMap[cronFHPAKey][ruleName]
|
||||
return cronRule.CronFederatedHPARule, exists
|
||||
}
|
||||
|
||||
// StopRuleExecutor stops the executor for specific CronFederatedHPA rule
|
||||
func (c *CronHandler) StopRuleExecutor(cronFHPAKey string, ruleName string) {
|
||||
c.executorLock.Lock()
|
||||
defer c.executorLock.Unlock()
|
||||
|
||||
if _, ok := c.cronExecutorMap[cronFHPAKey]; !ok {
|
||||
return
|
||||
}
|
||||
if _, ok := c.cronExecutorMap[cronFHPAKey][ruleName]; !ok {
|
||||
return
|
||||
}
|
||||
c.cronExecutorMap[cronFHPAKey][ruleName].Stop()
|
||||
delete(c.cronExecutorMap[cronFHPAKey], ruleName)
|
||||
}
|
||||
|
||||
// StopCronFHPAExecutor stops the executor for specific CronFederatedHPA
|
||||
func (c *CronHandler) StopCronFHPAExecutor(cronFHPAKey string) {
|
||||
c.executorLock.Lock()
|
||||
defer c.executorLock.Unlock()
|
||||
|
||||
if _, ok := c.cronExecutorMap[cronFHPAKey]; !ok {
|
||||
return
|
||||
}
|
||||
for _, scheduler := range c.cronExecutorMap[cronFHPAKey] {
|
||||
scheduler.Stop()
|
||||
}
|
||||
|
||||
delete(c.cronExecutorMap, cronFHPAKey)
|
||||
}
|
||||
|
||||
// CreateCronJobForExecutor creates the executor for a rule of CronFederatedHPA
|
||||
func (c *CronHandler) CreateCronJobForExecutor(cronFHPA *autoscalingv1alpha1.CronFederatedHPA,
|
||||
rule autoscalingv1alpha1.CronFederatedHPARule) error {
|
||||
var err error
|
||||
timeZone := time.Now().Location()
|
||||
|
||||
if rule.TimeZone != nil {
|
||||
timeZone, err = time.LoadLocation(*rule.TimeZone)
|
||||
if err != nil {
|
||||
// This should not happen because there is validation in webhook
|
||||
klog.Errorf("Invalid CronFederatedHPA(%s/%s) rule(%s) time zone(%s):%v",
|
||||
cronFHPA.Namespace, cronFHPA.Namespace, rule.Name, *rule.TimeZone, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
scheduler := gocron.NewScheduler(timeZone)
|
||||
cronJob := NewCronFederatedHPAJob(c.client, c.eventRecorder, scheduler, cronFHPA, rule)
|
||||
if _, err := scheduler.Cron(rule.Schedule).Do(RunCronFederatedHPARule, cronJob); err != nil {
|
||||
klog.Errorf("Create cron job for CronFederatedHPA(%s/%s) rule(%s) error:%v",
|
||||
cronFHPA.Namespace, cronFHPA.Name, rule.Name, err)
|
||||
return err
|
||||
}
|
||||
scheduler.StartAsync()
|
||||
|
||||
cronFHPAKey := helper.GetCronFederatedHPAKey(cronFHPA)
|
||||
c.executorLock.Lock()
|
||||
defer c.executorLock.Unlock()
|
||||
ruleExecutorMap := c.cronExecutorMap[cronFHPAKey]
|
||||
ruleExecutorMap[rule.Name] = RuleCron{Scheduler: scheduler, CronFederatedHPARule: rule}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CronHandler) GetRuleNextExecuteTime(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, ruleName string) (time.Time, error) {
|
||||
c.executorLock.RLock()
|
||||
defer c.executorLock.RUnlock()
|
||||
|
||||
if _, ok := c.cronExecutorMap[helper.GetCronFederatedHPAKey(cronFHPA)]; !ok {
|
||||
return time.Time{}, fmt.Errorf("CronFederatedHPA(%s/%s) not start", cronFHPA.Namespace, cronFHPA.Name)
|
||||
}
|
||||
|
||||
ruleCron, exists := c.cronExecutorMap[helper.GetCronFederatedHPAKey(cronFHPA)][ruleName]
|
||||
if !exists {
|
||||
return time.Time{}, fmt.Errorf("CronFederatedHPA(%s/%s) rule(%s) not exist", cronFHPA.Namespace, cronFHPA.Name, ruleName)
|
||||
}
|
||||
|
||||
_, next := ruleCron.Scheduler.NextRun()
|
||||
return next, nil
|
||||
}
|
|
@ -0,0 +1,322 @@
|
|||
/*
|
||||
Copyright 2023 The Karmada Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cronfederatedhpa
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-co-op/gocron"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
)
|
||||
|
||||
type CronFederatedHPAJob struct {
|
||||
client client.Client
|
||||
eventRecorder record.EventRecorder
|
||||
scheduler *gocron.Scheduler
|
||||
|
||||
namespaceName types.NamespacedName
|
||||
rule autoscalingv1alpha1.CronFederatedHPARule
|
||||
}
|
||||
|
||||
func NewCronFederatedHPAJob(client client.Client, eventRecorder record.EventRecorder, scheduler *gocron.Scheduler,
|
||||
cronFHPA *autoscalingv1alpha1.CronFederatedHPA, rule autoscalingv1alpha1.CronFederatedHPARule) *CronFederatedHPAJob {
|
||||
return &CronFederatedHPAJob{
|
||||
client: client,
|
||||
eventRecorder: eventRecorder,
|
||||
scheduler: scheduler,
|
||||
namespaceName: types.NamespacedName{
|
||||
Name: cronFHPA.Name,
|
||||
Namespace: cronFHPA.Namespace,
|
||||
},
|
||||
rule: rule,
|
||||
}
|
||||
}
|
||||
|
||||
func RunCronFederatedHPARule(c *CronFederatedHPAJob) {
|
||||
klog.V(4).Infof("Start to handle CronFederatedHPA %s", c.namespaceName)
|
||||
defer klog.V(4).Infof("End to handle CronFederatedHPA %s", c.namespaceName)
|
||||
|
||||
var err error
|
||||
cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{}
|
||||
err = c.client.Get(context.TODO(), c.namespaceName, cronFHPA)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.Infof("CronFederatedHPA(%s) not found", c.namespaceName)
|
||||
} else {
|
||||
// TODO: This may happen when the the network is down, we should do something here
|
||||
// But we are not sure what to do(retry not solve the problem)
|
||||
klog.Errorf("Get CronFederatedHPA(%s) failed: %v", c.namespaceName, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if helper.IsCronFederatedHPARuleSuspend(c.rule) {
|
||||
// If the rule is suspended, this job will be stopped soon
|
||||
klog.V(4).Infof("CronFederatedHPA(%s) Rule(%s) is suspended, skip it", c.namespaceName, c.rule.Name)
|
||||
return
|
||||
}
|
||||
|
||||
var scaleErr error
|
||||
defer func() {
|
||||
if scaleErr != nil {
|
||||
c.eventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "ScaleFailed", scaleErr.Error())
|
||||
err = c.addFailedExecutionHistory(cronFHPA, scaleErr.Error())
|
||||
} else {
|
||||
err = c.addSuccessExecutionHistory(cronFHPA, c.rule.TargetReplicas, c.rule.TargetMinReplicas, c.rule.TargetMaxReplicas)
|
||||
}
|
||||
if err != nil {
|
||||
c.eventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "UpdateStatusFailed", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
if cronFHPA.Spec.ScaleTargetRef.APIVersion == autoscalingv1alpha1.GroupVersion.String() {
|
||||
if cronFHPA.Spec.ScaleTargetRef.Kind != autoscalingv1alpha1.FederatedHPAKind {
|
||||
scaleErr = fmt.Errorf("CronFederatedHPA(%s) do not support scale target %s/%s",
|
||||
c.namespaceName, cronFHPA.Spec.ScaleTargetRef.APIVersion, cronFHPA.Spec.ScaleTargetRef.Kind)
|
||||
return
|
||||
}
|
||||
|
||||
scaleErr = retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
|
||||
err = c.ScaleFHPA(cronFHPA)
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// scale workload directly
|
||||
scaleErr = retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
|
||||
err = c.ScaleWorkloads(cronFHPA)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CronFederatedHPAJob) ScaleFHPA(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) error {
|
||||
fhpaName := types.NamespacedName{
|
||||
Namespace: cronFHPA.Namespace,
|
||||
Name: cronFHPA.Spec.ScaleTargetRef.Name,
|
||||
}
|
||||
|
||||
fhpa := &autoscalingv1alpha1.FederatedHPA{}
|
||||
err := c.client.Get(context.TODO(), fhpaName, fhpa)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
update := false
|
||||
if c.rule.TargetMaxReplicas != nil && fhpa.Spec.MaxReplicas != *c.rule.TargetMaxReplicas {
|
||||
fhpa.Spec.MaxReplicas = *c.rule.TargetMaxReplicas
|
||||
update = true
|
||||
}
|
||||
if c.rule.TargetMinReplicas != nil && *fhpa.Spec.MinReplicas != *c.rule.TargetMinReplicas {
|
||||
*fhpa.Spec.MinReplicas = *c.rule.TargetMinReplicas
|
||||
update = true
|
||||
}
|
||||
|
||||
if update {
|
||||
err := c.client.Update(context.TODO(), fhpa)
|
||||
if err != nil {
|
||||
klog.Errorf("CronFederatedHPA(%s) updates FederatedHPA(%s/%s) failed: %v",
|
||||
c.namespaceName, fhpa.Namespace, fhpa.Name, err)
|
||||
return err
|
||||
}
|
||||
klog.V(4).Infof("CronFederatedHPA(%s) scales FederatedHPA(%s/%s) successfully",
|
||||
c.namespaceName, fhpa.Namespace, fhpa.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(4).Infof("CronFederatedHPA(%s) find nothing updated for FederatedHPA(%s/%s), skip it",
|
||||
c.namespaceName, fhpa.Namespace, fhpa.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CronFederatedHPAJob) ScaleWorkloads(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) error {
|
||||
ctx := context.Background()
|
||||
|
||||
scaleClient := c.client.SubResource("scale")
|
||||
|
||||
targetGV, err := schema.ParseGroupVersion(cronFHPA.Spec.ScaleTargetRef.APIVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("CronFederatedHPA(%s) parses GroupVersion(%s) failed: %v",
|
||||
c.namespaceName, cronFHPA.Spec.ScaleTargetRef.APIVersion, err)
|
||||
return err
|
||||
}
|
||||
targetGVK := schema.GroupVersionKind{
|
||||
Group: targetGV.Group,
|
||||
Kind: cronFHPA.Spec.ScaleTargetRef.Kind,
|
||||
Version: targetGV.Version,
|
||||
}
|
||||
targetResource := &unstructured.Unstructured{}
|
||||
targetResource.SetGroupVersionKind(targetGVK)
|
||||
err = c.client.Get(ctx, types.NamespacedName{Namespace: cronFHPA.Namespace, Name: cronFHPA.Spec.ScaleTargetRef.Name}, targetResource)
|
||||
if err != nil {
|
||||
klog.Errorf("Get Resource(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
scaleObj := &unstructured.Unstructured{}
|
||||
err = scaleClient.Get(ctx, targetResource, scaleObj)
|
||||
if err != nil {
|
||||
klog.Errorf("Get Scale for resource(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
scale := &autoscalingv1.Scale{}
|
||||
err = helper.ConvertToTypedObject(scaleObj, scale)
|
||||
if err != nil {
|
||||
klog.Errorf("Convert Scale failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if scale.Spec.Replicas != *c.rule.TargetReplicas {
|
||||
if err := helper.ApplyReplica(scaleObj, int64(*c.rule.TargetReplicas), util.ReplicasField); err != nil {
|
||||
klog.Errorf("CronFederatedHPA(%s) applies Replicas for %s/%s failed: %v",
|
||||
c.namespaceName, cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name, err)
|
||||
return err
|
||||
}
|
||||
err := scaleClient.Update(ctx, targetResource, client.WithSubResourceBody(scaleObj))
|
||||
if err != nil {
|
||||
klog.Errorf("CronFederatedHPA(%s) updates scale resource failed: %v", c.namespaceName, err)
|
||||
return err
|
||||
}
|
||||
klog.V(4).Infof("CronFederatedHPA(%s) scales resource(%s/%s) successfully",
|
||||
c.namespaceName, cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name)
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CronFederatedHPAJob) addFailedExecutionHistory(
|
||||
cronFHPA *autoscalingv1alpha1.CronFederatedHPA, errMsg string) error {
|
||||
_, nextExecutionTime := c.scheduler.NextRun()
|
||||
|
||||
// Add success history record, return false if there is no such rule
|
||||
addFailedHistoryFunc := func() bool {
|
||||
exists := false
|
||||
for index, rule := range cronFHPA.Status.ExecutionHistories {
|
||||
if rule.RuleName != c.rule.Name {
|
||||
continue
|
||||
}
|
||||
failedExecution := autoscalingv1alpha1.FailedExecution{
|
||||
ScheduleTime: rule.NextExecutionTime,
|
||||
ExecutionTime: &metav1.Time{Time: time.Now()},
|
||||
Message: errMsg,
|
||||
}
|
||||
historyLimits := helper.GetCronFederatedHPAFailedHistoryLimits(c.rule)
|
||||
if len(rule.FailedExecutions) > historyLimits-1 {
|
||||
rule.FailedExecutions = rule.FailedExecutions[:historyLimits-1]
|
||||
}
|
||||
cronFHPA.Status.ExecutionHistories[index].FailedExecutions =
|
||||
append([]autoscalingv1alpha1.FailedExecution{failedExecution}, rule.FailedExecutions...)
|
||||
cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = &metav1.Time{Time: nextExecutionTime}
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
|
||||
return exists
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
|
||||
// If this history not exist, it means the rule is suspended or deleted, so just ignore it.
|
||||
if exists := addFailedHistoryFunc(); !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
updateErr := c.client.Status().Update(context.Background(), cronFHPA)
|
||||
if updateErr == nil {
|
||||
klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
updated := &autoscalingv1alpha1.CronFederatedHPA{}
|
||||
if err = c.client.Get(context.Background(), client.ObjectKey{Namespace: cronFHPA.Namespace, Name: cronFHPA.Name}, updated); err == nil {
|
||||
cronFHPA = updated
|
||||
} else {
|
||||
klog.Errorf("Get CronFederatedHPA(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Name, err)
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CronFederatedHPAJob) addSuccessExecutionHistory(
|
||||
cronFHPA *autoscalingv1alpha1.CronFederatedHPA,
|
||||
appliedReplicas, appliedMaxReplicas, appliedMinReplicas *int32) error {
|
||||
_, nextExecutionTime := c.scheduler.NextRun()
|
||||
|
||||
// Add success history record, return false if there is no such rule
|
||||
addSuccessHistoryFunc := func() bool {
|
||||
exists := false
|
||||
for index, rule := range cronFHPA.Status.ExecutionHistories {
|
||||
if rule.RuleName != c.rule.Name {
|
||||
continue
|
||||
}
|
||||
successExecution := autoscalingv1alpha1.SuccessfulExecution{
|
||||
ScheduleTime: rule.NextExecutionTime,
|
||||
ExecutionTime: &metav1.Time{Time: time.Now()},
|
||||
AppliedReplicas: appliedReplicas,
|
||||
AppliedMaxReplicas: appliedMaxReplicas,
|
||||
AppliedMinReplicas: appliedMinReplicas,
|
||||
}
|
||||
historyLimits := helper.GetCronFederatedHPASuccessHistoryLimits(c.rule)
|
||||
if len(rule.SuccessfulExecutions) > historyLimits-1 {
|
||||
rule.SuccessfulExecutions = rule.SuccessfulExecutions[:historyLimits-1]
|
||||
}
|
||||
cronFHPA.Status.ExecutionHistories[index].SuccessfulExecutions =
|
||||
append([]autoscalingv1alpha1.SuccessfulExecution{successExecution}, rule.SuccessfulExecutions...)
|
||||
cronFHPA.Status.ExecutionHistories[index].NextExecutionTime = &metav1.Time{Time: nextExecutionTime}
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
|
||||
return exists
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
|
||||
// If this history not exist, it means the rule deleted, so just ignore it.
|
||||
if exists := addSuccessHistoryFunc(); !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
updateErr := c.client.Status().Update(context.Background(), cronFHPA)
|
||||
if updateErr == nil {
|
||||
klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
updated := &autoscalingv1alpha1.CronFederatedHPA{}
|
||||
if err = c.client.Get(context.Background(), client.ObjectKey{Namespace: cronFHPA.Namespace, Name: cronFHPA.Name}, updated); err == nil {
|
||||
cronFHPA = updated
|
||||
} else {
|
||||
klog.Errorf("Get CronFederatedHPA(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Name, err)
|
||||
}
|
||||
return updateErr
|
||||
})
|
||||
}
|
|
@ -8,6 +8,7 @@ import (
|
|||
eventsv1 "k8s.io/api/events/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
||||
networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
|
||||
|
@ -44,6 +45,7 @@ func NewSkippedResourceConfig() *SkippedResourceConfig {
|
|||
r.DisableGroup(workv1alpha1.GroupVersion.Group)
|
||||
r.DisableGroup(configv1alpha1.GroupVersion.Group)
|
||||
r.DisableGroup(networkingv1alpha1.GroupVersion.Group)
|
||||
r.DisableGroup(autoscalingv1alpha1.GroupVersion.Group)
|
||||
// disable event by default
|
||||
r.DisableGroup(eventsv1.GroupName)
|
||||
r.DisableGroupVersionKind(corev1EventGVK)
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
Copyright 2023 The Karmada Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helper
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||
)
|
||||
|
||||
func IsCronFederatedHPARuleSuspend(rule autoscalingv1alpha1.CronFederatedHPARule) bool {
|
||||
if rule.Suspend == nil {
|
||||
return false
|
||||
}
|
||||
return *rule.Suspend
|
||||
}
|
||||
|
||||
func GetCronFederatedHPASuccessHistoryLimits(rule autoscalingv1alpha1.CronFederatedHPARule) int {
|
||||
if rule.SuccessfulHistoryLimit == nil {
|
||||
return 3
|
||||
}
|
||||
return int(*rule.SuccessfulHistoryLimit)
|
||||
}
|
||||
|
||||
func GetCronFederatedHPAFailedHistoryLimits(rule autoscalingv1alpha1.CronFederatedHPARule) int {
|
||||
if rule.FailedHistoryLimit == nil {
|
||||
return 3
|
||||
}
|
||||
return int(*rule.FailedHistoryLimit)
|
||||
}
|
||||
|
||||
func GetCronFederatedHPAKey(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) string {
|
||||
namespacedName := types.NamespacedName{Namespace: cronFHPA.Namespace, Name: cronFHPA.Name}
|
||||
return namespacedName.String()
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
Copyright 2023 The Karmada Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cronfederatedhpa
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
_ "time/tzdata"
|
||||
|
||||
"github.com/adhocore/gronx"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
|
||||
)
|
||||
|
||||
// ValidatingAdmission validates CronFederatedHPA object when creating/updating.
|
||||
type ValidatingAdmission struct {
|
||||
decoder *admission.Decoder
|
||||
}
|
||||
|
||||
// Check if our ValidatingAdmission implements necessary interface
|
||||
var _ admission.Handler = &ValidatingAdmission{}
|
||||
var _ admission.DecoderInjector = &ValidatingAdmission{}
|
||||
|
||||
// Handle implements admission.Handler interface.
|
||||
// It yields a response to an AdmissionRequest.
|
||||
func (v *ValidatingAdmission) Handle(_ context.Context, req admission.Request) admission.Response {
|
||||
cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{}
|
||||
|
||||
err := v.decoder.Decode(req, cronFHPA)
|
||||
if err != nil {
|
||||
return admission.Errored(http.StatusBadRequest, err)
|
||||
}
|
||||
klog.V(2).Infof("Validating CronFederatedHPA(%s) for request: %s", klog.KObj(cronFHPA).String(), req.Operation)
|
||||
|
||||
if errs := v.validateCronFederatedHPASpec(cronFHPA); len(errs) != 0 {
|
||||
return admission.Denied(errs.ToAggregate().Error())
|
||||
}
|
||||
|
||||
return admission.Allowed("")
|
||||
}
|
||||
|
||||
// InjectDecoder implements admission.DecoderInjector interface.
|
||||
// A decoder will be automatically injected.
|
||||
func (v *ValidatingAdmission) InjectDecoder(d *admission.Decoder) error {
|
||||
v.decoder = d
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateCronFederatedHPASpec validates CronFederatedHPA spec
|
||||
func (v *ValidatingAdmission) validateCronFederatedHPASpec(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) field.ErrorList {
|
||||
errs := field.ErrorList{}
|
||||
scaleFHPA := false
|
||||
|
||||
scaleTargetRef := cronFHPA.Spec.ScaleTargetRef
|
||||
if scaleTargetRef.APIVersion == autoscalingv1alpha1.GroupVersion.String() {
|
||||
if scaleTargetRef.Kind != autoscalingv1alpha1.FederatedHPAKind {
|
||||
kindFieldPath := field.NewPath("spec").Child("scaleTargetRef").Child("kind")
|
||||
fieldError := field.Invalid(kindFieldPath, scaleTargetRef.Kind,
|
||||
fmt.Sprintf("invalid scaleTargetRef kind: %s, only support %s", scaleTargetRef.Kind, autoscalingv1alpha1.FederatedHPAKind))
|
||||
errs = append(errs, fieldError)
|
||||
return errs
|
||||
}
|
||||
scaleFHPA = true
|
||||
}
|
||||
|
||||
errs = append(errs, v.validateCronFederatedHPARules(cronFHPA.Spec.Rules, scaleFHPA, scaleTargetRef.Kind)...)
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// validateCronFederatedHPARules validates CronFederatedHPA rules
|
||||
func (v *ValidatingAdmission) validateCronFederatedHPARules(rules []autoscalingv1alpha1.CronFederatedHPARule,
|
||||
scaleFHPA bool, scaleTargetKind string) field.ErrorList {
|
||||
errs := field.ErrorList{}
|
||||
|
||||
ruleFieldPath := field.NewPath("spec").Child("rules")
|
||||
ruleNameSet := sets.NewString()
|
||||
for index, rule := range rules {
|
||||
if ruleNameSet.Has(rule.Name) {
|
||||
errs = append(errs, field.Duplicate(field.NewPath("spec").
|
||||
Child("rules").Index(index).Child("name"), rule.Name))
|
||||
}
|
||||
ruleNameSet.Insert(rule.Name)
|
||||
|
||||
// Validate cron format
|
||||
cronValidator := gronx.New()
|
||||
if !cronValidator.IsValid(rule.Schedule) {
|
||||
errs = append(errs, field.Invalid(ruleFieldPath.Index(index).Child("schedule"), rule.Schedule, "invalid cron format"))
|
||||
}
|
||||
|
||||
// Validate timezone
|
||||
if rule.TimeZone != nil {
|
||||
_, err := time.LoadLocation(*rule.TimeZone)
|
||||
if err != nil {
|
||||
errs = append(errs, field.Invalid(ruleFieldPath.Index(index).Child("timeZone"), rule.TimeZone, err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
if scaleFHPA {
|
||||
// Validate targetMinReplicas and targetMaxReplicas
|
||||
if rule.TargetMinReplicas == nil && rule.TargetMaxReplicas == nil {
|
||||
errMsg := "targetMinReplicas and targetMaxReplicas cannot be nil at the same time if you want to scale FederatedHPA"
|
||||
errs = append(errs, field.Invalid(ruleFieldPath.Index(index), "", errMsg))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate targetReplicas
|
||||
if rule.TargetReplicas == nil {
|
||||
errMsg := fmt.Sprintf("targetReplicas cannot be nil if you want to scale %s", scaleTargetKind)
|
||||
errs = append(errs, field.Invalid(ruleFieldPath.Index(index), "", errMsg))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
root = true
|
||||
|
||||
[*]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
end_of_line = lf
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
tab_width = 2
|
|
@ -0,0 +1,11 @@
|
|||
.idea/
|
||||
.DS_Store
|
||||
*~
|
||||
*.out
|
||||
vendor/
|
||||
dist/
|
||||
.env
|
||||
bin/
|
||||
*.php
|
||||
test/*.go
|
||||
*.txt
|
|
@ -0,0 +1,67 @@
|
|||
project_name: tasker
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
name_template: "Version v{{.Version}}"
|
||||
# draft: true
|
||||
mode: "keep-existing"
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
|
||||
builds:
|
||||
- <<: &build_defaults
|
||||
binary: bin/tasker
|
||||
main: ./cmd/tasker
|
||||
ldflags:
|
||||
- -X main.Version={{.Version}}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
id: macOS
|
||||
goos: [darwin]
|
||||
goarch: [amd64, arm64]
|
||||
|
||||
- <<: *build_defaults
|
||||
id: linux
|
||||
goos: [linux]
|
||||
goarch: [386, arm, amd64, arm64]
|
||||
|
||||
- <<: *build_defaults
|
||||
id: windows
|
||||
goos: [windows]
|
||||
goarch: [amd64]
|
||||
|
||||
archives:
|
||||
- id: nix
|
||||
builds: [macOS, linux]
|
||||
<<: &archive_defaults
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
wrap_in_directory: true
|
||||
rlcp: true
|
||||
format: tar.gz
|
||||
files:
|
||||
- LICENSE
|
||||
|
||||
- id: windows
|
||||
builds: [windows]
|
||||
<<: *archive_defaults
|
||||
wrap_in_directory: false
|
||||
format: zip
|
||||
files:
|
||||
- LICENSE
|
||||
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
algorithm: sha256
|
||||
|
||||
changelog:
|
||||
skip: true
|
||||
use: github
|
||||
sort: desc
|
||||
filters:
|
||||
exclude:
|
||||
- '^doc:'
|
||||
- '^dev:'
|
||||
- '^build:'
|
||||
- '^ci:'
|
|
@ -0,0 +1,119 @@
|
|||
## [v0.2.7](https://github.com/adhocore/gronx/releases/tag/v0.2.7) (2022-06-28)
|
||||
|
||||
### Miscellaneous
|
||||
- **Workflow**: Run tests on 1.18x (Jitendra)
|
||||
- Tests for go v1.17.x, add codecov (Jitendra)
|
||||
|
||||
|
||||
## [v0.2.6](https://github.com/adhocore/gronx/releases/tag/v0.2.6) (2021-10-14)
|
||||
|
||||
### Miscellaneous
|
||||
- Fix 'with' languages (Jitendra Adhikari) [_a813b55_](https://github.com/adhocore/gronx/commit/a813b55)
|
||||
- Init/setup github codeql (Jitendra Adhikari) [_fe2aa5a_](https://github.com/adhocore/gronx/commit/fe2aa5a)
|
||||
|
||||
|
||||
## [v0.2.5](https://github.com/adhocore/gronx/releases/tag/v0.2.5) (2021-07-25)
|
||||
|
||||
### Bug Fixes
|
||||
- **Tasker**: The clause should be using OR (Jitendra Adhikari) [_b813b85_](https://github.com/adhocore/gronx/commit/b813b85)
|
||||
|
||||
|
||||
## [v0.2.4](https://github.com/adhocore/gronx/releases/tag/v0.2.4) (2021-05-05)
|
||||
|
||||
### Features
|
||||
- **Pkg.tasker**: Capture cmd output in tasker logger, error in stderr (Jitendra Adhikari) [_0da0aae_](https://github.com/adhocore/gronx/commit/0da0aae)
|
||||
|
||||
### Internal Refactors
|
||||
- **Cmd.tasker**: Taskify is now method of tasker (Jitendra Adhikari) [_8b1373b_](https://github.com/adhocore/gronx/commit/8b1373b)
|
||||
|
||||
|
||||
## [v0.2.3](https://github.com/adhocore/gronx/releases/tag/v0.2.3) (2021-05-04)
|
||||
|
||||
### Bug Fixes
|
||||
- **Pkg.tasker**: Sleep 100ms so abort can be bailed asap, remove dup msg (Jitendra Adhikari) [_d868920_](https://github.com/adhocore/gronx/commit/d868920)
|
||||
|
||||
### Miscellaneous
|
||||
- Allow leeway period at the end (Jitendra Adhikari) [_5ebf923_](https://github.com/adhocore/gronx/commit/5ebf923)
|
||||
|
||||
|
||||
## [v0.2.2](https://github.com/adhocore/gronx/releases/tag/v0.2.2) (2021-05-03)
|
||||
|
||||
### Bug Fixes
|
||||
- **Pkg.tasker**: DoRun checks if timed out before run (Jitendra Adhikari) [_f27a657_](https://github.com/adhocore/gronx/commit/f27a657)
|
||||
|
||||
### Internal Refactors
|
||||
- **Pkg.tasker**: Use dateFormat var, update final tick phrase (Jitendra Adhikari) [_fad0271_](https://github.com/adhocore/gronx/commit/fad0271)
|
||||
|
||||
|
||||
## [v0.2.1](https://github.com/adhocore/gronx/releases/tag/v0.2.1) (2021-05-02)
|
||||
|
||||
### Bug Fixes
|
||||
- **Pkg.tasker**: Deprecate sleep dur if next tick timeout (Jitendra Adhikari) [_3de45a1_](https://github.com/adhocore/gronx/commit/3de45a1)
|
||||
|
||||
|
||||
## [v0.2.0](https://github.com/adhocore/gronx/releases/tag/v0.2.0) (2021-05-02)
|
||||
|
||||
### Features
|
||||
- **Cmd.tasker**: Add tasker for standalone usage as task daemon (Jitendra Adhikari) [_0d99409_](https://github.com/adhocore/gronx/commit/0d99409)
|
||||
- **Pkg.tasker**: Add parser for tasker pkg (Jitendra Adhikari) [_e7f1811_](https://github.com/adhocore/gronx/commit/e7f1811)
|
||||
- **Pkg.tasker**: Add tasker pkg (Jitendra Adhikari) [_a57b1c4_](https://github.com/adhocore/gronx/commit/a57b1c4)
|
||||
|
||||
### Bug Fixes
|
||||
- **Pkg.tasker**: Use log.New() instead (Jitendra Adhikari) [_0cf2c07_](https://github.com/adhocore/gronx/commit/0cf2c07)
|
||||
- **Validator**: This check is not really required (Jitendra Adhikari) [_c3d75e3_](https://github.com/adhocore/gronx/commit/c3d75e3)
|
||||
|
||||
### Internal Refactors
|
||||
- **Gronx**: Add public methods for internal usage, expose spaceRe (Jitendra Adhikari) [_94eb20b_](https://github.com/adhocore/gronx/commit/94eb20b)
|
||||
|
||||
### Miscellaneous
|
||||
- **Pkg.tasker**: Use file perms as octal (Jitendra Adhikari) [_83f258d_](https://github.com/adhocore/gronx/commit/83f258d)
|
||||
- **Workflow**: Include all tests in action (Jitendra Adhikari) [_7328cbf_](https://github.com/adhocore/gronx/commit/7328cbf)
|
||||
|
||||
### Documentations
|
||||
- Add task mangager and tasker docs/usages (Jitendra Adhikari) [_e77aa5f_](https://github.com/adhocore/gronx/commit/e77aa5f)
|
||||
|
||||
|
||||
## [v0.1.4](https://github.com/adhocore/gronx/releases/tag/v0.1.4) (2021-04-25)
|
||||
|
||||
### Miscellaneous
|
||||
- **Mod**: 1.13 is okay too (Jitendra Adhikari) [_6c328e7_](https://github.com/adhocore/gronx/commit/6c328e7)
|
||||
- Try go 1.13.x (Jitendra Adhikari) [_b017ec4_](https://github.com/adhocore/gronx/commit/b017ec4)
|
||||
|
||||
### Documentations
|
||||
- Practical usage (Jitendra Adhikari) [_9572e61_](https://github.com/adhocore/gronx/commit/9572e61)
|
||||
|
||||
|
||||
## [v0.1.3](https://github.com/adhocore/gronx/releases/tag/v0.1.3) (2021-04-22)
|
||||
|
||||
### Internal Refactors
|
||||
- **Checker**: Preserve error, for pos 2 & 4 bail only on due or err (Jitendra Adhikari) [_39a9cd5_](https://github.com/adhocore/gronx/commit/39a9cd5)
|
||||
- **Validator**: Do not discard error from strconv (Jitendra Adhikari) [_3b0f444_](https://github.com/adhocore/gronx/commit/3b0f444)
|
||||
|
||||
|
||||
## [v0.1.2](https://github.com/adhocore/gronx/releases/tag/v0.1.2) (2021-04-21)
|
||||
|
||||
### Features
|
||||
- Add IsValid() (Jitendra Adhikari) [_150687b_](https://github.com/adhocore/gronx/commit/150687b)
|
||||
|
||||
### Documentations
|
||||
- IsValid usage (Jitendra Adhikari) [_b747116_](https://github.com/adhocore/gronx/commit/b747116)
|
||||
|
||||
|
||||
## [v0.1.1](https://github.com/adhocore/gronx/releases/tag/v0.1.1) (2021-04-21)
|
||||
|
||||
### Features
|
||||
- Add main gronx api (Jitendra Adhikari) [_1b3b108_](https://github.com/adhocore/gronx/commit/1b3b108)
|
||||
- Add cron segment checker (Jitendra Adhikari) [_a56be7c_](https://github.com/adhocore/gronx/commit/a56be7c)
|
||||
- Add validator (Jitendra Adhikari) [_455a024_](https://github.com/adhocore/gronx/commit/455a024)
|
||||
|
||||
### Miscellaneous
|
||||
- **Workflow**: Update actions (Jitendra Adhikari) [_8b54cc3_](https://github.com/adhocore/gronx/commit/8b54cc3)
|
||||
- Init module (Jitendra Adhikari) [_bada37d_](https://github.com/adhocore/gronx/commit/bada37d)
|
||||
- Add license (Jitendra Adhikari) [_5f20b96_](https://github.com/adhocore/gronx/commit/5f20b96)
|
||||
- **Gh**: Add meta files (Jitendra Adhikari) [_35a1310_](https://github.com/adhocore/gronx/commit/35a1310)
|
||||
- **Workflow**: Add lint/test actions (Jitendra Adhikari) [_884d5cb_](https://github.com/adhocore/gronx/commit/884d5cb)
|
||||
- Add editorconfig (Jitendra Adhikari) [_8b75494_](https://github.com/adhocore/gronx/commit/8b75494)
|
||||
|
||||
### Documentations
|
||||
- On cron expressions (Jitendra Adhikari) [_547fd72_](https://github.com/adhocore/gronx/commit/547fd72)
|
||||
- Add readme (Jitendra Adhikari) [_3955e88_](https://github.com/adhocore/gronx/commit/3955e88)
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2021-2099 Jitendra Adhikari
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,328 @@
|
|||
# adhocore/gronx
|
||||
|
||||
[](https://github.com/adhocore/gronx/releases)
|
||||
[](LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/adhocore/gronx)
|
||||
[](https://github.com/adhocore/gronx/actions/workflows/test-action.yml)
|
||||
[](https://github.com/adhocore/gronx/actions/workflows/lint-action.yml)
|
||||
[](https://codecov.io/gh/adhocore/gronx)
|
||||
[](https://github.com/sponsors/adhocore)
|
||||
[](https://twitter.com/intent/tweet?text=Lightweight+fast+and+deps+free+cron+expression+parser+for+Golang&url=https://github.com/adhocore/gronx&hashtags=go,golang,parser,cron,cronexpr,cronparser)
|
||||
|
||||
`gronx` is Golang [cron expression](#cron-expression) parser ported from [adhocore/cron-expr](https://github.com/adhocore/php-cron-expr) with task runner
|
||||
and daemon that supports crontab like task list file. Use it programatically in Golang or as standalone binary instead of crond.
|
||||
|
||||
- Zero dependency.
|
||||
- Very **fast** because it bails early in case a segment doesn't match.
|
||||
- Built in crontab like daemon.
|
||||
- Supports time granularity of Seconds.
|
||||
|
||||
Find gronx in [pkg.go.dev](https://pkg.go.dev/github.com/adhocore/gronx).
|
||||
|
||||
## Installation
|
||||
|
||||
```sh
|
||||
go get -u github.com/adhocore/gronx
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/adhocore/gronx"
|
||||
)
|
||||
|
||||
gron := gronx.New()
|
||||
expr := "* * * * *"
|
||||
|
||||
// check if expr is even valid, returns bool
|
||||
gron.IsValid(expr) // true
|
||||
|
||||
// check if expr is due for current time, returns bool and error
|
||||
gron.IsDue(expr) // true|false, nil
|
||||
|
||||
// check if expr is due for given time
|
||||
gron.IsDue(expr, time.Date(2021, time.April, 1, 1, 1, 0, 0, time.UTC)) // true|false, nil
|
||||
```
|
||||
|
||||
### Batch Due Check
|
||||
|
||||
If you have multiple cron expressions to check due on same reference time use `BatchDue()`:
|
||||
```go
|
||||
gron := gronx.New()
|
||||
exprs := []string{"* * * * *", "0 */5 * * * *"}
|
||||
|
||||
// gives []gronx.Expr{} array, each item has Due flag and Err enountered.
|
||||
dues := gron.BatchDue(exprs)
|
||||
|
||||
for _, expr := range dues {
|
||||
if expr.Err != nil {
|
||||
// Handle err
|
||||
} else if expr.Due {
|
||||
// Handle due
|
||||
}
|
||||
}
|
||||
|
||||
// Or with given time
|
||||
ref := time.Now()
|
||||
gron.BatchDue(exprs, ref)
|
||||
```
|
||||
|
||||
### Next Tick
|
||||
|
||||
To find out when is the cron due next (in near future):
|
||||
```go
|
||||
allowCurrent = true // includes current time as well
|
||||
nextTime, err := gron.NextTick(expr, allowCurrent) // gives time.Time, error
|
||||
|
||||
// OR, next tick after certain reference time
|
||||
refTime = time.Date(2022, time.November, 1, 1, 1, 0, 0, time.UTC)
|
||||
allowCurrent = false // excludes the ref time
|
||||
nextTime, err := gron.NextTickAfter(expr, refTime, allowCurrent) // gives time.Time, error
|
||||
```
|
||||
|
||||
### Prev Tick
|
||||
|
||||
To find out when was the cron due previously (in near past):
|
||||
```go
|
||||
allowCurrent = true // includes current time as well
|
||||
prevTime, err := gron.PrevTick(expr, allowCurrent) // gives time.Time, error
|
||||
|
||||
// OR, prev tick before certain reference time
|
||||
refTime = time.Date(2022, time.November, 1, 1, 1, 0, 0, time.UTC)
|
||||
allowCurrent = false // excludes the ref time
|
||||
nextTime, err := gron.PrevTickBefore(expr, refTime, allowCurrent) // gives time.Time, error
|
||||
```
|
||||
|
||||
> The working of `PrevTick*()` and `NextTick*()` are mostly the same except the direction.
|
||||
> They differ in lookback or lookahead.
|
||||
|
||||
### Standalone Daemon
|
||||
|
||||
In a more practical level, you would use this tool to manage and invoke jobs in app itself and not
|
||||
mess around with `crontab` for each and every new tasks/jobs.
|
||||
|
||||
In crontab just put one entry with `* * * * *` which points to your Go entry point that uses this tool.
|
||||
Then in that entry point you would invoke different tasks if the corresponding Cron expr is due.
|
||||
Simple map structure would work for this.
|
||||
|
||||
Check the section below for more sophisticated way of managing tasks automatically using `gronx` daemon called `tasker`.
|
||||
|
||||
---
|
||||
### Go Tasker
|
||||
|
||||
Tasker is a task manager that can be programatically used in Golang applications. It runs as a daemon and invokes tasks scheduled with cron expression:
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/adhocore/gronx/pkg/tasker"
|
||||
)
|
||||
|
||||
func main() {
|
||||
taskr := tasker.New(tasker.Option{
|
||||
Verbose: true,
|
||||
// optional: defaults to local
|
||||
Tz: "Asia/Bangkok",
|
||||
// optional: defaults to stderr log stream
|
||||
Out: "/full/path/to/output-file",
|
||||
})
|
||||
|
||||
// add task to run every minute
|
||||
taskr.Task("* * * * *", func(ctx context.Context) (int, error) {
|
||||
// do something ...
|
||||
|
||||
// then return exit code and error, for eg: if everything okay
|
||||
return 0, nil
|
||||
}).Task("*/5 * * * *", func(ctx context.Context) (int, error) { // every 5 minutes
|
||||
// you can also log the output to Out file as configured in Option above:
|
||||
taskr.Log.Printf("done something in %d s", 2)
|
||||
|
||||
return 0, nil
|
||||
})
|
||||
|
||||
// run task without overlap, set concurrent flag to false:
|
||||
concurrent := false
|
||||
taskr.Task("* * * * * *", , tasker.Taskify("sleep 2", tasker.Option{}), concurrent)
|
||||
|
||||
// every 10 minute with arbitrary command
|
||||
taskr.Task("@10minutes", taskr.Taskify("command --option val -- args", tasker.Option{Shell: "/bin/sh -c"}))
|
||||
|
||||
// ... add more tasks
|
||||
|
||||
// optionally if you want tasker to stop after 2 hour, pass the duration with Until():
|
||||
taskr.Until(2 * time.Hour)
|
||||
|
||||
// finally run the tasker, it ticks sharply on every minute and runs all the tasks due on that time!
|
||||
// it exits gracefully when ctrl+c is received making sure pending tasks are completed.
|
||||
taskr.Run()
|
||||
}
|
||||
```
|
||||
|
||||
#### Concurrency
|
||||
|
||||
By default the tasks can run concurrently i.e if previous run is still not finished
|
||||
but it is now due again, it will run again.
|
||||
If you want to run only one instance of a task at a time, set concurrent flag to false:
|
||||
|
||||
```go
|
||||
taskr := tasker.New(tasker.Option{})
|
||||
|
||||
concurrent := false
|
||||
expr, task := "* * * * * *", tasker.Taskify("php -r 'sleep(2);'")
|
||||
taskr.Task(expr, task, concurrent)
|
||||
```
|
||||
|
||||
### Task Daemon
|
||||
|
||||
It can also be used as standalone task daemon instead of programmatic usage for Golang application.
|
||||
|
||||
First, just install tasker command:
|
||||
```sh
|
||||
go install github.com/adhocore/gronx/cmd/tasker@latest
|
||||
```
|
||||
|
||||
Or you can also download latest prebuilt binary from [release](https://github.com/adhocore/gronx/releases/latest) for platform of your choice.
|
||||
|
||||
Then prepare a taskfile ([example](./tests/../test/taskfile.txt)) in crontab format
|
||||
(or can even point to existing crontab).
|
||||
> `user` is not supported: it is just cron expr followed by the command.
|
||||
|
||||
Finally run the task daemon like so
|
||||
```
|
||||
tasker -file path/to/taskfile
|
||||
```
|
||||
> You can pass more options to control the behavior of task daemon, see below.
|
||||
|
||||
#### Tasker command options:
|
||||
|
||||
```txt
|
||||
-file string <required>
|
||||
The task file in crontab format
|
||||
-out string
|
||||
The fullpath to file where output from tasks are sent to
|
||||
-shell string
|
||||
The shell to use for running tasks (default "/usr/bin/bash")
|
||||
-tz string
|
||||
The timezone to use for tasks (default "Local")
|
||||
-until int
|
||||
The timeout for task daemon in minutes
|
||||
-verbose
|
||||
The verbose mode outputs as much as possible
|
||||
```
|
||||
|
||||
Examples:
|
||||
```sh
|
||||
tasker -verbose -file path/to/taskfile -until 120 # run until next 120min (i.e 2hour) with all feedbacks echoed back
|
||||
tasker -verbose -file path/to/taskfile -out path/to/output # with all feedbacks echoed to the output file
|
||||
tasker -tz America/New_York -file path/to/taskfile -shell zsh # run all tasks using zsh shell based on NY timezone
|
||||
```
|
||||
|
||||
> File extension of taskfile for (`-file` option) does not matter: can be any or none.
|
||||
> The directory for outfile (`-out` option) must exist, file is created by task daemon.
|
||||
|
||||
> Same timezone applies for all tasks currently and it might support overriding timezone per task in future release.
|
||||
|
||||
#### Notes on Windows
|
||||
|
||||
In Windows if it doesn't find `bash.exe` or `git-bash.exe` it will use `powershell`.
|
||||
`powershell` may not be compatible with Unix flavored commands. Also to note:
|
||||
you can't do chaining with `cmd1 && cmd2` but rather `cmd1 ; cmd2`.
|
||||
|
||||
---
|
||||
### Cron Expression
|
||||
|
||||
A complete cron expression consists of 7 segments viz:
|
||||
```
|
||||
<second> <minute> <hour> <day> <month> <weekday> <year>
|
||||
```
|
||||
|
||||
However only 5 will do and this is most commonly used. 5 segments are interpreted as:
|
||||
```
|
||||
<minute> <hour> <day> <month> <weekday>
|
||||
```
|
||||
in which case a default value of 0 is prepended for `<second>` position.
|
||||
|
||||
In a 6 segments expression, if 6th segment matches `<year>` (i.e 4 digits at least) it will be interpreted as:
|
||||
```
|
||||
<minute> <hour> <day> <month> <weekday> <year>
|
||||
```
|
||||
and a default value of 0 is prepended for `<second>` position.
|
||||
|
||||
For each segments you can have **multiple choices** separated by comma:
|
||||
> Eg: `0 0,30 * * * *` means either 0th or 30th minute.
|
||||
|
||||
To specify **range of values** you can use dash:
|
||||
> Eg: `0 10-15 * * * *` means 10th, 11th, 12th, 13th, 14th and 15th minute.
|
||||
|
||||
To specify **range of step** you can combine a dash and slash:
|
||||
> Eg: `0 10-15/2 * * * *` means every 2 minutes between 10 and 15 i.e 10th, 12th and 14th minute.
|
||||
|
||||
For the `<day>` and `<weekday>` segment, there are additional [**modifiers**](#modifiers) (optional).
|
||||
|
||||
And if you want, you can mix the multiple choices, ranges and steps in a single expression:
|
||||
> `0 5,12-20/4,55 * * * *` matches if any one of `5` or `12-20/4` or `55` matches the minute.
|
||||
|
||||
### Real Abbreviations
|
||||
|
||||
You can use real abbreviations (3 chars) for month and week days. eg: `JAN`, `dec`, `fri`, `SUN`
|
||||
|
||||
### Tags
|
||||
|
||||
Following tags are available and they are converted to real cron expressions before parsing:
|
||||
|
||||
- *@yearly* or *@annually* - every year
|
||||
- *@monthly* - every month
|
||||
- *@daily* - every day
|
||||
- *@weekly* - every week
|
||||
- *@hourly* - every hour
|
||||
- *@5minutes* - every 5 minutes
|
||||
- *@10minutes* - every 10 minutes
|
||||
- *@15minutes* - every 15 minutes
|
||||
- *@30minutes* - every 30 minutes
|
||||
- *@always* - every minute
|
||||
- *@everysecond* - every second
|
||||
|
||||
> For BC reasons, `@always` still means every minute for now, in future release it may mean every seconds instead.
|
||||
|
||||
```go
|
||||
// Use tags like so:
|
||||
gron.IsDue("@hourly")
|
||||
gron.IsDue("@5minutes")
|
||||
```
|
||||
|
||||
### Modifiers
|
||||
|
||||
Following modifiers supported
|
||||
|
||||
- *Day of Month / 3rd of 5 segments / 4th of 6+ segments:*
|
||||
- `L` stands for last day of month (eg: `L` could mean 29th for February in leap year)
|
||||
- `W` stands for closest week day (eg: `10W` is closest week days (MON-FRI) to 10th date)
|
||||
- *Day of Week / 5th of 5 segments / 6th of 6+ segments:*
|
||||
- `L` stands for last weekday of month (eg: `2L` is last monday)
|
||||
- `#` stands for nth day of week in the month (eg: `1#2` is second sunday)
|
||||
|
||||
---
|
||||
## License
|
||||
|
||||
> © [MIT](./LICENSE) | 2021-2099, Jitendra Adhikari
|
||||
|
||||
## Credits
|
||||
|
||||
This project is ported from [adhocore/cron-expr](https://github.com/adhocore/php-cron-expr) and
|
||||
release managed by [please](https://github.com/adhocore/please).
|
||||
|
||||
---
|
||||
### Other projects
|
||||
|
||||
My other golang projects you might find interesting and useful:
|
||||
|
||||
- [**urlsh**](https://github.com/adhocore/urlsh) - URL shortener and bookmarker service with UI, API, Cache, Hits Counter and forwarder using postgres and redis in backend, bulma in frontend; has [web](https://urlssh.xyz) and cli client
|
||||
- [**fast**](https://github.com/adhocore/fast) - Check your internet speed with ease and comfort right from the terminal
|
||||
- [**goic**](https://github.com/adhocore/goic) - Go Open ID Connect, is OpenID connect client library for Golang, supports the Authorization Code Flow of OpenID Connect specification.
|
||||
- [**chin**](https://github.com/adhocore/chin) - A Go lang command line tool to show a spinner as user waits for some long running jobs to finish.
|
|
@ -0,0 +1 @@
|
|||
v0.2.7
|
|
@ -0,0 +1,51 @@
|
|||
package gronx
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Expr represents an item in array for batch check
|
||||
type Expr struct {
|
||||
Expr string
|
||||
Due bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// BatchDue checks if multiple expressions are due for given time (or now).
|
||||
// It returns []Expr with filled in Due and Err values.
|
||||
func (g *Gronx) BatchDue(exprs []string, ref ...time.Time) []Expr {
|
||||
ref = append(ref, time.Now())
|
||||
g.C.SetRef(ref[0])
|
||||
|
||||
var segs []string
|
||||
|
||||
cache, batch := map[string]Expr{}, make([]Expr, len(exprs))
|
||||
for i := range exprs {
|
||||
batch[i].Expr = exprs[i]
|
||||
segs, batch[i].Err = Segments(exprs[i])
|
||||
key := strings.Join(segs, " ")
|
||||
if batch[i].Err != nil {
|
||||
cache[key] = batch[i]
|
||||
continue
|
||||
}
|
||||
|
||||
if c, ok := cache[key]; ok {
|
||||
batch[i] = c
|
||||
batch[i].Expr = exprs[i]
|
||||
continue
|
||||
}
|
||||
|
||||
due := true
|
||||
for pos, seg := range segs {
|
||||
if seg != "*" && seg != "?" {
|
||||
if due, batch[i].Err = g.C.CheckDue(seg, pos); !due || batch[i].Err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
batch[i].Due = due
|
||||
cache[key] = batch[i]
|
||||
}
|
||||
return batch
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
package gronx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Checker is interface for cron segment due check.
|
||||
type Checker interface {
|
||||
GetRef() time.Time
|
||||
SetRef(ref time.Time)
|
||||
CheckDue(segment string, pos int) (bool, error)
|
||||
}
|
||||
|
||||
// SegmentChecker is factory implementation of Checker.
|
||||
type SegmentChecker struct {
|
||||
ref time.Time
|
||||
}
|
||||
|
||||
// GetRef returns the current reference time
|
||||
func (c *SegmentChecker) GetRef() time.Time {
|
||||
return c.ref
|
||||
}
|
||||
|
||||
// SetRef sets the reference time for which to check if a cron expression is due.
|
||||
func (c *SegmentChecker) SetRef(ref time.Time) {
|
||||
c.ref = ref
|
||||
}
|
||||
|
||||
// CheckDue checks if the cron segment at given position is due.
|
||||
// It returns bool or error if any.
|
||||
func (c *SegmentChecker) CheckDue(segment string, pos int) (due bool, err error) {
|
||||
ref, last := c.GetRef(), -1
|
||||
val, loc := valueByPos(ref, pos), ref.Location()
|
||||
isMonth, isWeekDay := pos == 3, pos == 5
|
||||
|
||||
for _, offset := range strings.Split(segment, ",") {
|
||||
mod := (isMonth || isWeekDay) && strings.ContainsAny(offset, "LW#")
|
||||
if due, err = c.isOffsetDue(offset, val, pos); due || (!mod && err != nil) {
|
||||
return
|
||||
}
|
||||
if !mod {
|
||||
continue
|
||||
}
|
||||
if last == -1 {
|
||||
last = time.Date(ref.Year(), ref.Month(), 1, 0, 0, 0, 0, loc).AddDate(0, 1, 0).Add(-time.Nanosecond).Day()
|
||||
}
|
||||
if isMonth {
|
||||
due, err = isValidMonthDay(offset, last, ref)
|
||||
} else if isWeekDay {
|
||||
due, err = isValidWeekDay(offset, last, ref)
|
||||
}
|
||||
if due || err != nil {
|
||||
return due, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *SegmentChecker) isOffsetDue(offset string, val, pos int) (bool, error) {
|
||||
if offset == "*" || offset == "?" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
bounds, isWeekDay := boundsByPos(pos), pos == 5
|
||||
if strings.Contains(offset, "/") {
|
||||
return inStep(val, offset, bounds)
|
||||
}
|
||||
if strings.Contains(offset, "-") {
|
||||
if isWeekDay {
|
||||
offset = strings.Replace(offset, "7-", "0-", 1)
|
||||
}
|
||||
return inRange(val, offset, bounds)
|
||||
}
|
||||
|
||||
if !isWeekDay && (val == 0 || offset == "0") {
|
||||
return offset == "0" && val == 0, nil
|
||||
}
|
||||
|
||||
nval, err := strconv.Atoi(offset)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if nval < bounds[0] || nval > bounds[1] {
|
||||
return false, fmt.Errorf("segment#%d: '%s' out of bounds(%d, %d)", pos, offset, bounds[0], bounds[1])
|
||||
}
|
||||
|
||||
return nval == val || (isWeekDay && nval == 7 && val == 0), nil
|
||||
}
|
||||
|
||||
func valueByPos(ref time.Time, pos int) (val int) {
|
||||
switch pos {
|
||||
case 0:
|
||||
val = ref.Second()
|
||||
case 1:
|
||||
val = ref.Minute()
|
||||
case 2:
|
||||
val = ref.Hour()
|
||||
case 3:
|
||||
val = ref.Day()
|
||||
case 4:
|
||||
val = int(ref.Month())
|
||||
case 5:
|
||||
val = int(ref.Weekday())
|
||||
case 6:
|
||||
val = ref.Year()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func boundsByPos(pos int) (bounds []int) {
|
||||
bounds = []int{0, 0}
|
||||
switch pos {
|
||||
case 0, 1:
|
||||
bounds = []int{0, 59}
|
||||
case 2:
|
||||
bounds = []int{0, 23}
|
||||
case 3:
|
||||
bounds = []int{1, 31}
|
||||
case 4:
|
||||
bounds = []int{1, 12}
|
||||
case 5:
|
||||
bounds = []int{0, 7}
|
||||
case 6:
|
||||
bounds = []int{1, 9999}
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,128 @@
|
|||
package gronx
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var literals = strings.NewReplacer(
|
||||
"SUN", "0", "MON", "1", "TUE", "2", "WED", "3", "THU", "4", "FRI", "5", "SAT", "6",
|
||||
"JAN", "1", "FEB", "2", "MAR", "3", "APR", "4", "MAY", "5", "JUN", "6", "JUL", "7",
|
||||
"AUG", "8", "SEP", "9", "OCT", "10", "NOV", "11", "DEC", "12",
|
||||
)
|
||||
|
||||
var expressions = map[string]string{
|
||||
"@yearly": "0 0 1 1 *",
|
||||
"@annually": "0 0 1 1 *",
|
||||
"@monthly": "0 0 1 * *",
|
||||
"@weekly": "0 0 * * 0",
|
||||
"@daily": "0 0 * * *",
|
||||
"@hourly": "0 * * * *",
|
||||
"@always": "* * * * *",
|
||||
"@5minutes": "*/5 * * * *",
|
||||
"@10minutes": "*/10 * * * *",
|
||||
"@15minutes": "*/15 * * * *",
|
||||
"@30minutes": "0,30 * * * *",
|
||||
|
||||
"@everysecond": "* * * * * *",
|
||||
}
|
||||
|
||||
// SpaceRe is regex for whitespace.
|
||||
var SpaceRe = regexp.MustCompile(`\s+`)
|
||||
var yearRe = regexp.MustCompile(`\d{4}`)
|
||||
|
||||
func normalize(expr string) []string {
|
||||
expr = strings.Trim(expr, " \t")
|
||||
if e, ok := expressions[strings.ToLower(expr)]; ok {
|
||||
expr = e
|
||||
}
|
||||
|
||||
expr = SpaceRe.ReplaceAllString(expr, " ")
|
||||
expr = literals.Replace(strings.ToUpper(expr))
|
||||
|
||||
return strings.Split(strings.ReplaceAll(expr, " ", " "), " ")
|
||||
}
|
||||
|
||||
// Gronx is the main program.
|
||||
type Gronx struct {
|
||||
C Checker
|
||||
}
|
||||
|
||||
// New initializes Gronx with factory defaults.
|
||||
func New() Gronx {
|
||||
return Gronx{&SegmentChecker{}}
|
||||
}
|
||||
|
||||
// IsDue checks if cron expression is due for given reference time (or now).
|
||||
// It returns bool or error if any.
|
||||
func (g *Gronx) IsDue(expr string, ref ...time.Time) (bool, error) {
|
||||
ref = append(ref, time.Now())
|
||||
g.C.SetRef(ref[0])
|
||||
|
||||
segs, err := Segments(expr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return g.SegmentsDue(segs)
|
||||
}
|
||||
|
||||
func (g *Gronx) isDue(expr string, ref time.Time) bool {
|
||||
due, err := g.IsDue(expr, ref)
|
||||
return err == nil && due
|
||||
}
|
||||
|
||||
// Segments splits expr into array array of cron parts.
|
||||
// If expression contains 5 parts or 6th part is year like, it prepends a second.
|
||||
// It returns array or error.
|
||||
func Segments(expr string) ([]string, error) {
|
||||
segs := normalize(expr)
|
||||
slen := len(segs)
|
||||
if slen < 5 || slen > 7 {
|
||||
return []string{}, errors.New("expr should contain 5-7 segments separated by space")
|
||||
}
|
||||
|
||||
// Prepend second if required
|
||||
prepend := slen == 5 || (slen == 6 && yearRe.MatchString(segs[5]))
|
||||
if prepend {
|
||||
segs = append([]string{"0"}, segs...)
|
||||
}
|
||||
|
||||
return segs, nil
|
||||
}
|
||||
|
||||
// SegmentsDue checks if all cron parts are due.
|
||||
// It returns bool. You should use IsDue(expr) instead.
|
||||
func (g *Gronx) SegmentsDue(segs []string) (bool, error) {
|
||||
for pos, seg := range segs {
|
||||
if seg == "*" || seg == "?" {
|
||||
continue
|
||||
}
|
||||
|
||||
if due, err := g.C.CheckDue(seg, pos); !due {
|
||||
return due, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// IsValid checks if cron expression is valid.
|
||||
// It returns bool.
|
||||
func (g *Gronx) IsValid(expr string) bool {
|
||||
segs, err := Segments(expr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
g.C.SetRef(time.Now())
|
||||
for pos, seg := range segs {
|
||||
if _, err := g.C.CheckDue(seg, pos); err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
package gronx
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CronDateFormat is Y-m-d H:i (seconds are not significant)
|
||||
const CronDateFormat = "2006-01-02 15:04"
|
||||
|
||||
// FullDateFormat is Y-m-d H:i:s (with seconds)
|
||||
const FullDateFormat = "2006-01-02 15:04:05"
|
||||
|
||||
// NextTick gives next run time from now
|
||||
func NextTick(expr string, inclRefTime bool) (time.Time, error) {
|
||||
return NextTickAfter(expr, time.Now(), inclRefTime)
|
||||
}
|
||||
|
||||
// NextTickAfter gives next run time from the provided time.Time
|
||||
func NextTickAfter(expr string, start time.Time, inclRefTime bool) (time.Time, error) {
|
||||
gron, next := New(), start.Truncate(time.Second)
|
||||
due, err := gron.IsDue(expr, start)
|
||||
if err != nil || (due && inclRefTime) {
|
||||
return start, err
|
||||
}
|
||||
|
||||
segments, _ := Segments(expr)
|
||||
if len(segments) > 6 && isUnreachableYear(segments[6], next, inclRefTime, false) {
|
||||
return next, fmt.Errorf("unreachable year segment: %s", segments[6])
|
||||
}
|
||||
|
||||
next, err = loop(gron, segments, next, inclRefTime, false)
|
||||
// Ignore superfluous err
|
||||
if err != nil && gron.isDue(expr, next) {
|
||||
err = nil
|
||||
}
|
||||
return next, err
|
||||
}
|
||||
|
||||
func loop(gron Gronx, segments []string, start time.Time, incl bool, reverse bool) (next time.Time, err error) {
|
||||
iter, next, bumped := 500, start, false
|
||||
over:
|
||||
for iter > 0 {
|
||||
iter--
|
||||
for pos, seg := range segments {
|
||||
if seg == "*" || seg == "?" {
|
||||
continue
|
||||
}
|
||||
if next, bumped, err = bumpUntilDue(gron.C, seg, pos, next, reverse); bumped {
|
||||
goto over
|
||||
}
|
||||
}
|
||||
if !incl && next.Format(FullDateFormat) == start.Format(FullDateFormat) {
|
||||
delta := time.Second
|
||||
if reverse {
|
||||
delta = -time.Second
|
||||
}
|
||||
next, _, err = bumpUntilDue(gron.C, segments[0], 0, next.Add(delta), reverse)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
return start, errors.New("tried so hard")
|
||||
}
|
||||
|
||||
var dashRe = regexp.MustCompile(`/.*$`)
|
||||
|
||||
func isUnreachableYear(year string, ref time.Time, incl bool, reverse bool) bool {
|
||||
if year == "*" || year == "?" {
|
||||
return false
|
||||
}
|
||||
|
||||
edge, inc := ref.Year(), 1
|
||||
if !incl {
|
||||
if reverse {
|
||||
inc = -1
|
||||
}
|
||||
edge += inc
|
||||
}
|
||||
for _, offset := range strings.Split(year, ",") {
|
||||
if strings.Index(offset, "*/") == 0 || strings.Index(offset, "0/") == 0 {
|
||||
return false
|
||||
}
|
||||
for _, part := range strings.Split(dashRe.ReplaceAllString(offset, ""), "-") {
|
||||
val, err := strconv.Atoi(part)
|
||||
if err != nil || (!reverse && val >= edge) || (reverse && val < edge) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var limit = map[int]int{0: 60, 1: 60, 2: 24, 3: 31, 4: 12, 5: 366, 6: 100}
|
||||
|
||||
func bumpUntilDue(c Checker, segment string, pos int, ref time.Time, reverse bool) (time.Time, bool, error) {
|
||||
// <second> <minute> <hour> <day> <month> <weekday> <year>
|
||||
iter := limit[pos]
|
||||
for iter > 0 {
|
||||
c.SetRef(ref)
|
||||
if ok, _ := c.CheckDue(segment, pos); ok {
|
||||
return ref, iter != limit[pos], nil
|
||||
}
|
||||
ref = bump(ref, pos, reverse)
|
||||
iter--
|
||||
}
|
||||
return ref, false, errors.New("tried so hard")
|
||||
}
|
||||
|
||||
func bump(ref time.Time, pos int, reverse bool) time.Time {
|
||||
factor := 1
|
||||
if reverse {
|
||||
factor = -1
|
||||
}
|
||||
|
||||
switch pos {
|
||||
case 0:
|
||||
ref = ref.Add(time.Duration(factor) * time.Second)
|
||||
case 1:
|
||||
ref = ref.Add(time.Duration(factor) * time.Minute)
|
||||
case 2:
|
||||
ref = ref.Add(time.Duration(factor) * time.Hour)
|
||||
case 3, 5:
|
||||
ref = ref.AddDate(0, 0, factor)
|
||||
case 4:
|
||||
ref = ref.AddDate(0, factor, 0)
|
||||
case 6:
|
||||
ref = ref.AddDate(factor, 0, 0)
|
||||
}
|
||||
return ref
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package gronx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PrevTick gives previous run time before now
|
||||
func PrevTick(expr string, inclRefTime bool) (time.Time, error) {
|
||||
return PrevTickBefore(expr, time.Now(), inclRefTime)
|
||||
}
|
||||
|
||||
// PrevTickBefore gives previous run time before given reference time
|
||||
func PrevTickBefore(expr string, start time.Time, inclRefTime bool) (time.Time, error) {
|
||||
gron, prev := New(), start.Truncate(time.Second)
|
||||
due, err := gron.IsDue(expr, start)
|
||||
if err != nil || (due && inclRefTime) {
|
||||
return prev, err
|
||||
}
|
||||
|
||||
segments, _ := Segments(expr)
|
||||
if len(segments) > 6 && isUnreachableYear(segments[6], prev, inclRefTime, true) {
|
||||
return prev, fmt.Errorf("unreachable year segment: %s", segments[6])
|
||||
}
|
||||
|
||||
prev, err = loop(gron, segments, prev, inclRefTime, true)
|
||||
// Ignore superfluous err
|
||||
if err != nil && gron.isDue(expr, prev) {
|
||||
err = nil
|
||||
}
|
||||
return prev, err
|
||||
}
|
|
@ -0,0 +1,146 @@
|
|||
package gronx
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func inStep(val int, s string, bounds []int) (bool, error) {
|
||||
parts := strings.Split(s, "/")
|
||||
step, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if step == 0 {
|
||||
return false, errors.New("step can't be 0")
|
||||
}
|
||||
|
||||
if strings.Index(s, "*/") == 0 || strings.Index(s, "0/") == 0 {
|
||||
return val%step == 0, nil
|
||||
}
|
||||
|
||||
sub, end := strings.Split(parts[0], "-"), val
|
||||
start, err := strconv.Atoi(sub[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(sub) > 1 {
|
||||
end, err = strconv.Atoi(sub[1])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if (len(sub) > 1 && end < start) || start < bounds[0] || end > bounds[1] {
|
||||
return false, fmt.Errorf("step '%s' out of bounds(%d, %d)", parts[0], bounds[0], bounds[1])
|
||||
}
|
||||
|
||||
return inStepRange(val, start, end, step), nil
|
||||
}
|
||||
|
||||
func inRange(val int, s string, bounds []int) (bool, error) {
|
||||
parts := strings.Split(s, "-")
|
||||
start, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
end, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if end < start || start < bounds[0] || end > bounds[1] {
|
||||
return false, fmt.Errorf("range '%s' out of bounds(%d, %d)", s, bounds[0], bounds[1])
|
||||
}
|
||||
|
||||
return start <= val && val <= end, nil
|
||||
}
|
||||
|
||||
func inStepRange(val, start, end, step int) bool {
|
||||
for i := start; i <= end && i <= val; i += step {
|
||||
if i == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isValidMonthDay(val string, last int, ref time.Time) (valid bool, err error) {
|
||||
day, loc := ref.Day(), ref.Location()
|
||||
if val == "L" {
|
||||
return day == last, nil
|
||||
}
|
||||
|
||||
pos := strings.Index(val, "W")
|
||||
if pos < 1 {
|
||||
return false, errors.New("invalid offset value: " + val)
|
||||
}
|
||||
|
||||
nval, err := strconv.Atoi(val[0:pos])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, i := range []int{0, -1, 1, -2, 2} {
|
||||
incr := i + nval
|
||||
if incr > 0 && incr <= last {
|
||||
iref := time.Date(ref.Year(), ref.Month(), incr, ref.Hour(), ref.Minute(), ref.Second(), 0, loc)
|
||||
week := int(iref.Weekday())
|
||||
|
||||
if week > 0 && week < 6 && iref.Month() == ref.Month() {
|
||||
valid = day == iref.Day()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return valid, nil
|
||||
}
|
||||
|
||||
func isValidWeekDay(val string, last int, ref time.Time) (bool, error) {
|
||||
loc := ref.Location()
|
||||
if pos := strings.Index(strings.ReplaceAll(val, "7L", "0L"), "L"); pos > 0 {
|
||||
nval, err := strconv.Atoi(val[0:pos])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for i := 0; i < 7; i++ {
|
||||
decr := last - i
|
||||
dref := time.Date(ref.Year(), ref.Month(), decr, ref.Hour(), ref.Minute(), ref.Second(), ref.Nanosecond(), loc)
|
||||
|
||||
if int(dref.Weekday()) == nval {
|
||||
return ref.Day() == decr, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pos := strings.Index(val, "#")
|
||||
parts := strings.Split(strings.ReplaceAll(val, "7#", "0#"), "#")
|
||||
if pos < 1 || len(parts) < 2 {
|
||||
return false, errors.New("invalid offset value: " + val)
|
||||
}
|
||||
|
||||
day, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
nth, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if day < 0 || day > 7 || nth < 1 || nth > 5 || int(ref.Weekday()) != day {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return ref.Day()/7 == nth-1, nil
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
local_testing
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
vendor/
|
||||
|
||||
# IDE project files
|
||||
.idea
|
|
@ -0,0 +1,50 @@
|
|||
run:
|
||||
timeout: 2m
|
||||
issues-exit-code: 1
|
||||
tests: true
|
||||
|
||||
issues:
|
||||
max-same-issues: 100
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- bodyclose
|
||||
- errcheck
|
||||
- gosec
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- errcheck
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||
format: colored-line-number
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
# make issues output unique by line, default is true
|
||||
uniq-by-line: true
|
||||
# add a prefix to the output file references; default is no prefix
|
||||
path-prefix: ""
|
||||
# sorts results by: filepath, line and column
|
||||
sort-results: true
|
||||
|
||||
linters-settings:
|
||||
golint:
|
||||
min-confidence: 0.8
|
||||
|
||||
fix: true
|
|
@ -0,0 +1,73 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone. And we mean everyone!
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and kind language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team initially on Slack to coordinate private communication. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
|
@ -0,0 +1,40 @@
|
|||
# Contributing to gocron
|
||||
|
||||
Thank you for coming to contribute to gocron! We welcome new ideas, PRs and general feedback.
|
||||
|
||||
## Reporting Bugs
|
||||
|
||||
If you find a bug then please let the project know by opening an issue after doing the following:
|
||||
|
||||
- Do a quick search of the existing issues to make sure the bug isn't already reported
|
||||
- Try and make a minimal list of steps that can reliably reproduce the bug you are experiencing
|
||||
- Collect as much information as you can to help identify what the issue is (project version, configuration files, etc)
|
||||
|
||||
## Suggesting Enhancements
|
||||
|
||||
If you have a use case that you don't see a way to support yet, we would welcome the feedback in an issue. Before opening the issue, please consider:
|
||||
|
||||
- Is this a common use case?
|
||||
- Is it simple to understand?
|
||||
|
||||
You can help us out by doing the following before raising a new issue:
|
||||
|
||||
- Check that the feature hasn't been requested already by searching existing issues
|
||||
- Try and reduce your enhancement into a single, concise and deliverable request, rather than a general idea
|
||||
- Explain your own use cases as the basis of the request
|
||||
|
||||
## Adding Features
|
||||
|
||||
Pull requests are always welcome. However, before going through the trouble of implementing a change it's worth creating a bug or feature request issue.
|
||||
This allows us to discuss the changes and make sure they are a good fit for the project.
|
||||
|
||||
Please always make sure a pull request has been:
|
||||
|
||||
- Unit tested with `make test`
|
||||
- Linted with `make lint`
|
||||
- Vetted with `make vet`
|
||||
- Formatted with `make fmt` or validated with `make check-fmt`
|
||||
|
||||
## Writing Tests
|
||||
|
||||
Tests should follow the [table driven test pattern](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go). See other tests in the code base for additional examples.
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2014, 辣椒面
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,13 @@
|
|||
.PHONY: fmt check-fmt lint vet test
|
||||
|
||||
GO_PKGS := $(shell go list -f {{.Dir}} ./...)
|
||||
|
||||
fmt:
|
||||
@go list -f {{.Dir}} ./... | xargs -I{} gofmt -w -s {}
|
||||
|
||||
lint:
|
||||
@grep "^func " example_test.go | sort -c
|
||||
@golangci-lint run
|
||||
|
||||
test:
|
||||
@go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS)
|
|
@ -0,0 +1,205 @@
|
|||
# gocron: A Golang Job Scheduling Package.
|
||||
|
||||
[](https://github.com/go-co-op/gocron/actions)
|
||||
 [](https://pkg.go.dev/github.com/go-co-op/gocron)
|
||||
|
||||
gocron is a job scheduling package which lets you run Go functions at pre-determined intervals
|
||||
using a simple, human-friendly syntax.
|
||||
|
||||
gocron is a Golang scheduler implementation similar to the Ruby module
|
||||
[clockwork](https://github.com/tomykaira/clockwork) and the Python job scheduling package [schedule](https://github.com/dbader/schedule).
|
||||
|
||||
See also these two great articles that were used for design input:
|
||||
|
||||
- [Rethinking Cron](http://adam.herokuapp.com/past/2010/4/13/rethinking_cron/)
|
||||
- [Replace Cron with Clockwork](http://adam.herokuapp.com/past/2010/6/30/replace_cron_with_clockwork/)
|
||||
|
||||
If you want to chat, you can find us at Slack!
|
||||
[<img src="https://img.shields.io/badge/gophers-gocron-brightgreen?logo=slack">](https://gophers.slack.com/archives/CQ7T0T1FW)
|
||||
|
||||
## Concepts
|
||||
|
||||
- **Scheduler**: The scheduler tracks all the jobs assigned to it and makes sure they are passed to the executor when
|
||||
ready to be run. The scheduler is able to manage overall aspects of job behavior like limiting how many jobs
|
||||
are running at one time.
|
||||
- **Job**: The job is simply aware of the task (go function) it's provided and is therefore only able to perform
|
||||
actions related to that task like preventing itself from overruning a previous task that is taking a long time.
|
||||
- **Executor**: The executor, as it's name suggests, is simply responsible for calling the task (go function) that
|
||||
the job hands to it when sent by the scheduler.
|
||||
|
||||
## Examples
|
||||
|
||||
```golang
|
||||
s := gocron.NewScheduler(time.UTC)
|
||||
|
||||
// Every starts the job immediately and then runs at the
|
||||
// specified interval
|
||||
job, err := s.Every(5).Seconds().Do(func(){ ... })
|
||||
if err != nil {
|
||||
// handle the error related to setting up the job
|
||||
}
|
||||
|
||||
// strings parse to duration
|
||||
s.Every("5m").Do(func(){ ... })
|
||||
|
||||
s.Every(5).Days().Do(func(){ ... })
|
||||
|
||||
s.Every(1).Month(1, 2, 3).Do(func(){ ... })
|
||||
|
||||
// set time
|
||||
s.Every(1).Day().At("10:30").Do(func(){ ... })
|
||||
|
||||
// set multiple times
|
||||
s.Every(1).Day().At("10:30;08:00").Do(func(){ ... })
|
||||
|
||||
s.Every(1).Day().At("10:30").At("08:00").Do(func(){ ... })
|
||||
|
||||
// Schedule each last day of the month
|
||||
s.Every(1).MonthLastDay().Do(func(){ ... })
|
||||
|
||||
// Or each last day of every other month
|
||||
s.Every(2).MonthLastDay().Do(func(){ ... })
|
||||
|
||||
// cron expressions supported
|
||||
s.Cron("*/1 * * * *").Do(task) // every minute
|
||||
|
||||
// cron second-level expressions supported
|
||||
s.CronWithSeconds("*/1 * * * * *").Do(task) // every second
|
||||
|
||||
// you can start running the scheduler in two different ways:
|
||||
// starts the scheduler asynchronously
|
||||
s.StartAsync()
|
||||
// starts the scheduler and blocks current execution path
|
||||
s.StartBlocking()
|
||||
```
|
||||
|
||||
For more examples, take a look in our [go docs](https://pkg.go.dev/github.com/go-co-op/gocron#pkg-examples)
|
||||
|
||||
## Options
|
||||
|
||||
| Interval | Supported schedule options |
|
||||
| ------------ | ------------------------------------------------------------------- |
|
||||
| sub-second | `StartAt()` |
|
||||
| milliseconds | `StartAt()` |
|
||||
| seconds | `StartAt()` |
|
||||
| minutes | `StartAt()` |
|
||||
| hours | `StartAt()` |
|
||||
| days | `StartAt()`, `At()` |
|
||||
| weeks | `StartAt()`, `At()`, `Weekday()` (and all week day named functions) |
|
||||
| months | `StartAt()`, `At()` |
|
||||
|
||||
There are several options available to restrict how jobs run:
|
||||
|
||||
| Mode | Function | Behavior |
|
||||
|---------------------|---------------------------|------------------------------------------------------------------------------------------------------|
|
||||
| Default | | jobs are rescheduled at every interval |
|
||||
| Job singleton | `SingletonMode()` | a long running job will not be rescheduled until the current run is completed |
|
||||
| Scheduler limit | `SetMaxConcurrentJobs()` | set a collective maximum number of concurrent jobs running across the scheduler |
|
||||
| Distributed locking | `WithDistributedLocker()` | prevents the same job from being run more than once when running multiple instances of the scheduler |
|
||||
|
||||
## Distributed Locker Implementations
|
||||
|
||||
- Redis: [redislock](https://github.com/go-co-op/gocron-redis-lock) `go get github.com/go-co-op/gocron-redis-lock`
|
||||
|
||||
## Tags
|
||||
|
||||
Jobs may have arbitrary tags added which can be useful when tracking many jobs.
|
||||
The scheduler supports both enforcing tags to be unique and when not unique,
|
||||
running all jobs with a given tag.
|
||||
|
||||
```golang
|
||||
s := gocron.NewScheduler(time.UTC)
|
||||
s.TagsUnique()
|
||||
|
||||
_, _ = s.Every(1).Week().Tag("foo").Do(task)
|
||||
_, err := s.Every(1).Week().Tag("foo").Do(task)
|
||||
// error!!!
|
||||
|
||||
s := gocron.NewScheduler(time.UTC)
|
||||
|
||||
s.Every(2).Day().Tag("tag").At("10:00").Do(task)
|
||||
s.Every(1).Minute().Tag("tag").Do(task)
|
||||
s.RunByTag("tag")
|
||||
// both jobs will run
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
- Q: I'm running multiple pods on a distributed environment. How can I make a job not run once per pod causing duplication?
|
||||
- We recommend using your own lock solution within the jobs themselves (you could use [Redis](https://redis.io/topics/distlock), for example)
|
||||
- A2: Use the scheduler option `WithDistributedLocker` and either use an implemented [backend](#distributed-locker-implementations)
|
||||
or implement your own and contribute it back in a PR!
|
||||
|
||||
- Q: I've removed my job from the scheduler, but how can I stop a long-running job that has already been triggered?
|
||||
- A: We recommend using a means of canceling your job, e.g. a `context.WithCancel()`.
|
||||
- A2: You can listen to the job context Done channel to know when the job has been canceled
|
||||
```golang
|
||||
task := func(in string, job gocron.Job) {
|
||||
fmt.Printf("this job's last run: %s this job's next run: %s\n", job.LastRun(), job.NextRun())
|
||||
fmt.Printf("in argument is %s\n", in)
|
||||
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-job.Context().Done():
|
||||
fmt.Printf("function has been canceled, performing cleanup and exiting gracefully\n")
|
||||
return
|
||||
case <-ticker.C:
|
||||
fmt.Printf("performing a hard job that takes a long time that I want to kill whenever I want\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
s := gocron.NewScheduler(time.UTC)
|
||||
s.SingletonModeAll()
|
||||
j, err := s.Every(1).Hour().Tag("myJob").DoWithJobDetails(task, "foo")
|
||||
if err != nil {
|
||||
log.Fatalln("error scheduling job", err)
|
||||
}
|
||||
|
||||
s.StartAsync()
|
||||
|
||||
// Simulate some more work
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// I want to stop the job, together with the underlying goroutine
|
||||
fmt.Printf("now I want to kill the job\n")
|
||||
err = s.RemoveByTag("myJob")
|
||||
if err != nil {
|
||||
log.Fatalln("error removing job by tag", err)
|
||||
}
|
||||
|
||||
// Wait a bit so that we can see that the job is exiting gracefully
|
||||
time.Sleep(time.Second)
|
||||
fmt.Printf("Job: %#v, Error: %#v", j, err)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Looking to contribute? Try to follow these guidelines:
|
||||
|
||||
- Use issues for everything
|
||||
- For a small change, just send a PR!
|
||||
- For bigger changes, please open an issue for discussion before sending a PR.
|
||||
- PRs should have: tests, documentation and examples (if it makes sense)
|
||||
- You can also contribute by:
|
||||
- Reporting issues
|
||||
- Suggesting new features or enhancements
|
||||
- Improving/fixing documentation
|
||||
|
||||
---
|
||||
|
||||
## Design
|
||||
|
||||

|
||||
|
||||
[Jetbrains](https://www.jetbrains.com/?from=gocron) supports this project with GoLand licenses. We appreciate their support for free and open source software!
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#go-co-op/gocron&Date)
|
||||
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
The current plan is to maintain version 1 as long as possible incorporating any necessary security patches.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 1.x.x | :white_check_mark: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Vulnerabilities can be reported by [opening an issue](https://github.com/go-co-op/gocron/issues/new/choose) or reaching out on Slack: [<img src="https://img.shields.io/badge/gophers-gocron-brightgreen?logo=slack">](https://gophers.slack.com/archives/CQ7T0T1FW)
|
||||
|
||||
We will do our best to addrerss any vulnerabilities in an expeditious manner.
|
|
@ -0,0 +1,273 @@
|
|||
package gocron
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
// RescheduleMode - the default is that if a limit on maximum
|
||||
// concurrent jobs is set and the limit is reached, a job will
|
||||
// skip it's run and try again on the next occurrence in the schedule
|
||||
RescheduleMode limitMode = iota
|
||||
|
||||
// WaitMode - if a limit on maximum concurrent jobs is set
|
||||
// and the limit is reached, a job will wait to try and run
|
||||
// until a spot in the limit is freed up.
|
||||
//
|
||||
// Note: this mode can produce unpredictable results as
|
||||
// job execution order isn't guaranteed. For example, a job that
|
||||
// executes frequently may pile up in the wait queue and be executed
|
||||
// many times back to back when the queue opens.
|
||||
//
|
||||
// Warning: do not use this mode if your jobs will continue to stack
|
||||
// up beyond the ability of the limit workers to keep up. An example of
|
||||
// what NOT to do:
|
||||
//
|
||||
// s.Every("1s").Do(func() {
|
||||
// // this will result in an ever-growing number of goroutines
|
||||
// // blocked trying to send to the buffered channel
|
||||
// time.Sleep(10 * time.Minute)
|
||||
// })
|
||||
|
||||
WaitMode
|
||||
)
|
||||
|
||||
type executor struct {
|
||||
jobFunctions chan jobFunction // the chan upon which the jobFunctions are passed in from the scheduler
|
||||
ctx context.Context // used to tell the executor to stop
|
||||
cancel context.CancelFunc // used to tell the executor to stop
|
||||
wg *sync.WaitGroup // used by the scheduler to wait for the executor to stop
|
||||
jobsWg *sync.WaitGroup // used by the executor to wait for all jobs to finish
|
||||
singletonWgs *sync.Map // used by the executor to wait for the singleton runners to complete
|
||||
skipExecution *atomic.Bool // used to pause the execution of jobs
|
||||
|
||||
limitMode limitMode // when SetMaxConcurrentJobs() is set upon the scheduler
|
||||
limitModeMaxRunningJobs int // stores the maximum number of concurrently running jobs
|
||||
limitModeFuncsRunning *atomic.Int64 // tracks the count of limited mode funcs running
|
||||
limitModeFuncWg *sync.WaitGroup // allow the executor to wait for limit mode functions to wrap up
|
||||
limitModeQueue chan jobFunction // pass job functions to the limit mode workers
|
||||
limitModeQueueMu *sync.Mutex // for protecting the limitModeQueue
|
||||
limitModeRunningJobs *atomic.Int64 // tracks the count of running jobs to check against the max
|
||||
stopped *atomic.Bool // allow workers to drain the buffered limitModeQueue
|
||||
|
||||
distributedLocker Locker // support running jobs across multiple instances
|
||||
}
|
||||
|
||||
func newExecutor() executor {
|
||||
e := executor{
|
||||
jobFunctions: make(chan jobFunction, 1),
|
||||
singletonWgs: &sync.Map{},
|
||||
limitModeFuncsRunning: atomic.NewInt64(0),
|
||||
limitModeFuncWg: &sync.WaitGroup{},
|
||||
limitModeRunningJobs: atomic.NewInt64(0),
|
||||
limitModeQueueMu: &sync.Mutex{},
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func runJob(f jobFunction) {
|
||||
f.runStartCount.Add(1)
|
||||
f.isRunning.Store(true)
|
||||
callJobFunc(f.eventListeners.onBeforeJobExecution)
|
||||
_ = callJobFuncWithParams(f.eventListeners.beforeJobRuns, []interface{}{f.getName()})
|
||||
err := callJobFuncWithParams(f.function, f.parameters)
|
||||
if err != nil {
|
||||
_ = callJobFuncWithParams(f.eventListeners.onError, []interface{}{f.getName(), err})
|
||||
} else {
|
||||
_ = callJobFuncWithParams(f.eventListeners.noError, []interface{}{f.getName()})
|
||||
}
|
||||
_ = callJobFuncWithParams(f.eventListeners.afterJobRuns, []interface{}{f.getName()})
|
||||
callJobFunc(f.eventListeners.onAfterJobExecution)
|
||||
f.isRunning.Store(false)
|
||||
f.runFinishCount.Add(1)
|
||||
}
|
||||
|
||||
func (jf *jobFunction) singletonRunner() {
|
||||
jf.singletonRunnerOn.Store(true)
|
||||
jf.singletonWgMu.Lock()
|
||||
jf.singletonWg.Add(1)
|
||||
jf.singletonWgMu.Unlock()
|
||||
for {
|
||||
select {
|
||||
case <-jf.ctx.Done():
|
||||
jf.singletonWg.Done()
|
||||
jf.singletonRunnerOn.Store(false)
|
||||
jf.singletonQueueMu.Lock()
|
||||
jf.singletonQueue = make(chan struct{}, 1000)
|
||||
jf.singletonQueueMu.Unlock()
|
||||
jf.stopped.Store(false)
|
||||
return
|
||||
case <-jf.singletonQueue:
|
||||
if !jf.stopped.Load() {
|
||||
runJob(*jf)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *executor) limitModeRunner() {
|
||||
for {
|
||||
select {
|
||||
case <-e.ctx.Done():
|
||||
e.limitModeFuncsRunning.Inc()
|
||||
e.limitModeFuncWg.Done()
|
||||
return
|
||||
case jf := <-e.limitModeQueue:
|
||||
if !e.stopped.Load() {
|
||||
e.runJob(jf)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *executor) start() {
|
||||
e.wg = &sync.WaitGroup{}
|
||||
e.wg.Add(1)
|
||||
|
||||
stopCtx, cancel := context.WithCancel(context.Background())
|
||||
e.ctx = stopCtx
|
||||
e.cancel = cancel
|
||||
|
||||
e.jobsWg = &sync.WaitGroup{}
|
||||
|
||||
e.stopped = atomic.NewBool(false)
|
||||
e.skipExecution = atomic.NewBool(false)
|
||||
|
||||
e.limitModeQueueMu.Lock()
|
||||
e.limitModeQueue = make(chan jobFunction, 1000)
|
||||
e.limitModeQueueMu.Unlock()
|
||||
go e.run()
|
||||
}
|
||||
|
||||
func (e *executor) runJob(f jobFunction) {
|
||||
switch f.runConfig.mode {
|
||||
case defaultMode:
|
||||
lockKey := f.jobName
|
||||
if lockKey == "" {
|
||||
lockKey = f.funcName
|
||||
}
|
||||
if e.distributedLocker != nil {
|
||||
l, err := e.distributedLocker.Lock(f.ctx, lockKey)
|
||||
if err != nil || l == nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
durationToNextRun := time.Until(f.jobFuncNextRun)
|
||||
if durationToNextRun > time.Second*5 {
|
||||
durationToNextRun = time.Second * 5
|
||||
}
|
||||
if durationToNextRun > time.Millisecond*100 {
|
||||
timer := time.NewTimer(time.Duration(float64(durationToNextRun) * 0.9))
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-e.ctx.Done():
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
_ = l.Unlock(f.ctx)
|
||||
}()
|
||||
}
|
||||
runJob(f)
|
||||
case singletonMode:
|
||||
e.singletonWgs.Store(f.singletonWg, f.singletonWgMu)
|
||||
|
||||
if !f.singletonRunnerOn.Load() {
|
||||
go f.singletonRunner()
|
||||
}
|
||||
f.singletonQueueMu.Lock()
|
||||
f.singletonQueue <- struct{}{}
|
||||
f.singletonQueueMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (e *executor) run() {
|
||||
for {
|
||||
select {
|
||||
case f := <-e.jobFunctions:
|
||||
if e.stopped.Load() || e.skipExecution.Load() {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.limitModeMaxRunningJobs > 0 {
|
||||
countRunning := e.limitModeFuncsRunning.Load()
|
||||
if countRunning < int64(e.limitModeMaxRunningJobs) {
|
||||
diff := int64(e.limitModeMaxRunningJobs) - countRunning
|
||||
for i := int64(0); i < diff; i++ {
|
||||
e.limitModeFuncWg.Add(1)
|
||||
go e.limitModeRunner()
|
||||
e.limitModeFuncsRunning.Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
e.jobsWg.Add(1)
|
||||
go func() {
|
||||
defer e.jobsWg.Done()
|
||||
|
||||
panicHandlerMutex.RLock()
|
||||
defer panicHandlerMutex.RUnlock()
|
||||
|
||||
if panicHandler != nil {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
panicHandler(f.funcName, r)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if e.limitModeMaxRunningJobs > 0 {
|
||||
switch e.limitMode {
|
||||
case RescheduleMode:
|
||||
if e.limitModeRunningJobs.Load() < int64(e.limitModeMaxRunningJobs) {
|
||||
select {
|
||||
case e.limitModeQueue <- f:
|
||||
case <-e.ctx.Done():
|
||||
}
|
||||
}
|
||||
case WaitMode:
|
||||
select {
|
||||
case e.limitModeQueue <- f:
|
||||
case <-e.ctx.Done():
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
e.runJob(f)
|
||||
}()
|
||||
case <-e.ctx.Done():
|
||||
e.jobsWg.Wait()
|
||||
e.wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *executor) stop() {
|
||||
e.stopped.Store(true)
|
||||
e.cancel()
|
||||
e.wg.Wait()
|
||||
if e.singletonWgs != nil {
|
||||
e.singletonWgs.Range(func(key, value interface{}) bool {
|
||||
wg, wgOk := key.(*sync.WaitGroup)
|
||||
mu, muOk := value.(*sync.Mutex)
|
||||
if wgOk && muOk {
|
||||
mu.Lock()
|
||||
wg.Wait()
|
||||
mu.Unlock()
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
if e.limitModeMaxRunningJobs > 0 {
|
||||
e.limitModeFuncWg.Wait()
|
||||
e.limitModeQueueMu.Lock()
|
||||
e.limitModeQueue = nil
|
||||
e.limitModeQueueMu.Unlock()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,146 @@
|
|||
// Package gocron : A Golang Job Scheduling Package.
|
||||
//
|
||||
// An in-process scheduler for periodic jobs that uses the builder pattern
|
||||
// for configuration. gocron lets you run Golang functions periodically
|
||||
// at pre-determined intervals using a simple, human-friendly syntax.
|
||||
package gocron
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PanicHandlerFunc represents a type that can be set to handle panics occurring
|
||||
// during job execution.
|
||||
type PanicHandlerFunc func(jobName string, recoverData interface{})
|
||||
|
||||
// The global panic handler
|
||||
var (
|
||||
panicHandler PanicHandlerFunc
|
||||
panicHandlerMutex = sync.RWMutex{}
|
||||
)
|
||||
|
||||
// SetPanicHandler sets the global panicHandler to the given function.
|
||||
// Leaving it nil or setting it to nil disables automatic panic handling.
|
||||
// If the panicHandler is not nil, any panic that occurs during executing a job will be recovered
|
||||
// and the panicHandlerFunc will be called with the job's funcName and the recover data.
|
||||
func SetPanicHandler(handler PanicHandlerFunc) {
|
||||
panicHandlerMutex.Lock()
|
||||
defer panicHandlerMutex.Unlock()
|
||||
panicHandler = handler
|
||||
}
|
||||
|
||||
// Error declarations for gocron related errors
|
||||
var (
|
||||
ErrNotAFunction = errors.New("gocron: only functions can be scheduled into the job queue")
|
||||
ErrNotScheduledWeekday = errors.New("gocron: job not scheduled weekly on a weekday")
|
||||
ErrJobNotFoundWithTag = errors.New("gocron: no jobs found with given tag")
|
||||
ErrUnsupportedTimeFormat = errors.New("gocron: the given time format is not supported")
|
||||
ErrInvalidInterval = errors.New("gocron: .Every() interval must be greater than 0")
|
||||
ErrInvalidIntervalType = errors.New("gocron: .Every() interval must be int, time.Duration, or string")
|
||||
ErrInvalidIntervalUnitsSelection = errors.New("gocron: .Every(time.Duration) and .Cron() cannot be used with units (e.g. .Seconds())")
|
||||
ErrInvalidFunctionParameters = errors.New("gocron: length of function parameters must match job function parameters")
|
||||
|
||||
ErrAtTimeNotSupported = errors.New("gocron: the At() method is not supported for this time unit")
|
||||
ErrWeekdayNotSupported = errors.New("gocron: weekday is not supported for time unit")
|
||||
ErrInvalidDayOfMonthEntry = errors.New("gocron: only days 1 through 28 are allowed for monthly schedules")
|
||||
ErrTagsUnique = func(tag string) error { return fmt.Errorf("gocron: a non-unique tag was set on the job: %s", tag) }
|
||||
ErrWrongParams = errors.New("gocron: wrong list of params")
|
||||
ErrDoWithJobDetails = errors.New("gocron: DoWithJobDetails expects a function whose last parameter is a gocron.Job")
|
||||
ErrUpdateCalledWithoutJob = errors.New("gocron: a call to Scheduler.Update() requires a call to Scheduler.Job() first")
|
||||
ErrCronParseFailure = errors.New("gocron: cron expression failed to be parsed")
|
||||
ErrInvalidDaysOfMonthDuplicateValue = errors.New("gocron: duplicate days of month is not allowed in Month() and Months() methods")
|
||||
)
|
||||
|
||||
func wrapOrError(toWrap error, err error) error {
|
||||
var returnErr error
|
||||
if toWrap != nil && !errors.Is(err, toWrap) {
|
||||
returnErr = fmt.Errorf("%s: %w", err, toWrap)
|
||||
} else {
|
||||
returnErr = err
|
||||
}
|
||||
return returnErr
|
||||
}
|
||||
|
||||
// regex patterns for supported time formats
|
||||
var (
|
||||
timeWithSeconds = regexp.MustCompile(`(?m)^\d{1,2}:\d\d:\d\d$`)
|
||||
timeWithoutSeconds = regexp.MustCompile(`(?m)^\d{1,2}:\d\d$`)
|
||||
)
|
||||
|
||||
type schedulingUnit int
|
||||
|
||||
const (
|
||||
// default unit is seconds
|
||||
milliseconds schedulingUnit = iota
|
||||
seconds
|
||||
minutes
|
||||
hours
|
||||
days
|
||||
weeks
|
||||
months
|
||||
duration
|
||||
crontab
|
||||
)
|
||||
|
||||
func callJobFunc(jobFunc interface{}) {
|
||||
if jobFunc == nil {
|
||||
return
|
||||
}
|
||||
f := reflect.ValueOf(jobFunc)
|
||||
if !f.IsZero() {
|
||||
f.Call([]reflect.Value{})
|
||||
}
|
||||
}
|
||||
|
||||
func callJobFuncWithParams(jobFunc interface{}, params []interface{}) error {
|
||||
if jobFunc == nil {
|
||||
return nil
|
||||
}
|
||||
f := reflect.ValueOf(jobFunc)
|
||||
if f.IsZero() {
|
||||
return nil
|
||||
}
|
||||
if len(params) != f.Type().NumIn() {
|
||||
return nil
|
||||
}
|
||||
in := make([]reflect.Value, len(params))
|
||||
for k, param := range params {
|
||||
in[k] = reflect.ValueOf(param)
|
||||
}
|
||||
vals := f.Call(in)
|
||||
for _, val := range vals {
|
||||
i := val.Interface()
|
||||
if err, ok := i.(error); ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFunctionName(fn interface{}) string {
|
||||
return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
|
||||
}
|
||||
|
||||
func parseTime(t string) (hour, min, sec int, err error) {
|
||||
var timeLayout string
|
||||
switch {
|
||||
case timeWithSeconds.Match([]byte(t)):
|
||||
timeLayout = "15:04:05"
|
||||
case timeWithoutSeconds.Match([]byte(t)):
|
||||
timeLayout = "15:04"
|
||||
default:
|
||||
return 0, 0, 0, ErrUnsupportedTimeFormat
|
||||
}
|
||||
|
||||
parsedTime, err := time.Parse(timeLayout, t)
|
||||
if err != nil {
|
||||
return 0, 0, 0, ErrUnsupportedTimeFormat
|
||||
}
|
||||
return parsedTime.Hour(), parsedTime.Minute(), parsedTime.Second(), nil
|
||||
}
|
|
@ -0,0 +1,602 @@
|
|||
package gocron
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/robfig/cron/v3"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
// Job struct stores the information necessary to run a Job
|
||||
type Job struct {
|
||||
mu *jobMutex
|
||||
jobFunction
|
||||
interval int // interval * unit between runs
|
||||
random // details for randomness
|
||||
duration time.Duration // time duration between runs
|
||||
unit schedulingUnit // time units, e.g. 'minutes', 'hours'...
|
||||
startsImmediately bool // if the Job should run upon scheduler start
|
||||
atTimes []time.Duration // optional time(s) at which this Job runs when interval is day
|
||||
startAtTime time.Time // optional time at which the Job starts
|
||||
error error // error related to Job
|
||||
|
||||
scheduledWeekdays []time.Weekday // Specific days of the week to start on
|
||||
daysOfTheMonth []int // Specific days of the month to run the job
|
||||
tags []string // allow the user to tag Jobs with certain labels
|
||||
timer *time.Timer // handles running tasks at specific time
|
||||
cronSchedule cron.Schedule // stores the schedule when a task uses cron
|
||||
runWithDetails bool // when true the job is passed as the last arg of the jobFunc
|
||||
}
|
||||
|
||||
type jobRunTimes struct {
|
||||
jobRunTimesMu *sync.Mutex
|
||||
previousRun time.Time // datetime of the run before last run
|
||||
lastRun time.Time // datetime of last run
|
||||
nextRun time.Time // datetime of next run
|
||||
}
|
||||
|
||||
type random struct {
|
||||
rand *rand.Rand
|
||||
randomizeInterval bool // whether the interval is random
|
||||
randomIntervalRange [2]int // random interval range
|
||||
}
|
||||
|
||||
type jobFunction struct {
|
||||
*jobRunTimes // tracking all the markers for job run times
|
||||
eventListeners // additional functions to allow run 'em during job performing
|
||||
function interface{} // task's function
|
||||
parameters []interface{} // task's function parameters
|
||||
parametersLen int // length of the passed parameters
|
||||
jobName string // key of the distributed lock
|
||||
funcName string // the name of the function - e.g. main.func1
|
||||
runConfig runConfig // configuration for how many times to run the job
|
||||
singletonQueueMu *sync.Mutex // mutex for singletonQueue
|
||||
singletonQueue chan struct{} // queues jobs for the singleton runner to handle
|
||||
singletonRunnerOn *atomic.Bool // whether the runner function for singleton is running
|
||||
ctx context.Context // for cancellation
|
||||
cancel context.CancelFunc // for cancellation
|
||||
isRunning *atomic.Bool // whether the job func is currently being run
|
||||
runStartCount *atomic.Int64 // number of times the job was started
|
||||
runFinishCount *atomic.Int64 // number of times the job was finished
|
||||
singletonWg *sync.WaitGroup // used by singleton runner
|
||||
singletonWgMu *sync.Mutex // use to protect the singletonWg
|
||||
stopped *atomic.Bool // tracks whether the job is currently stopped
|
||||
jobFuncNextRun time.Time // the next time the job is scheduled to run
|
||||
}
|
||||
|
||||
type eventListeners struct {
|
||||
onAfterJobExecution interface{} // deprecated
|
||||
onBeforeJobExecution interface{} // deprecated
|
||||
beforeJobRuns func(jobName string) // called before the job executes
|
||||
afterJobRuns func(jobName string) // called after the job executes
|
||||
onError func(jobName string, err error) // called when the job returns an error
|
||||
noError func(jobName string) // called when no error is returned
|
||||
}
|
||||
|
||||
type jobMutex struct {
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (jf *jobFunction) copy() jobFunction {
|
||||
cp := jobFunction{
|
||||
jobRunTimes: jf.jobRunTimes,
|
||||
eventListeners: jf.eventListeners,
|
||||
function: jf.function,
|
||||
parameters: nil,
|
||||
parametersLen: jf.parametersLen,
|
||||
funcName: jf.funcName,
|
||||
jobName: jf.jobName,
|
||||
runConfig: jf.runConfig,
|
||||
singletonQueue: jf.singletonQueue,
|
||||
singletonQueueMu: jf.singletonQueueMu,
|
||||
ctx: jf.ctx,
|
||||
cancel: jf.cancel,
|
||||
isRunning: jf.isRunning,
|
||||
runStartCount: jf.runStartCount,
|
||||
runFinishCount: jf.runFinishCount,
|
||||
singletonWg: jf.singletonWg,
|
||||
singletonWgMu: jf.singletonWgMu,
|
||||
singletonRunnerOn: jf.singletonRunnerOn,
|
||||
stopped: jf.stopped,
|
||||
jobFuncNextRun: jf.jobFuncNextRun,
|
||||
}
|
||||
cp.parameters = append(cp.parameters, jf.parameters...)
|
||||
return cp
|
||||
}
|
||||
|
||||
func (jf *jobFunction) getName() string {
|
||||
if jf.jobName != "" {
|
||||
return jf.jobName
|
||||
}
|
||||
return jf.funcName
|
||||
}
|
||||
|
||||
type runConfig struct {
|
||||
finiteRuns bool
|
||||
maxRuns int
|
||||
mode mode
|
||||
}
|
||||
|
||||
// mode is the Job's running mode
|
||||
type mode int8
|
||||
|
||||
const (
|
||||
// defaultMode disable any mode
|
||||
defaultMode mode = iota
|
||||
|
||||
// singletonMode switch to single job mode
|
||||
singletonMode
|
||||
)
|
||||
|
||||
// newJob creates a new Job with the provided interval
|
||||
func newJob(interval int, startImmediately bool, singletonMode bool) *Job {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
job := &Job{
|
||||
mu: &jobMutex{},
|
||||
interval: interval,
|
||||
unit: seconds,
|
||||
jobFunction: jobFunction{
|
||||
jobRunTimes: &jobRunTimes{
|
||||
jobRunTimesMu: &sync.Mutex{},
|
||||
lastRun: time.Time{},
|
||||
nextRun: time.Time{},
|
||||
},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
isRunning: atomic.NewBool(false),
|
||||
runStartCount: atomic.NewInt64(0),
|
||||
runFinishCount: atomic.NewInt64(0),
|
||||
singletonRunnerOn: atomic.NewBool(false),
|
||||
stopped: atomic.NewBool(false),
|
||||
},
|
||||
tags: []string{},
|
||||
startsImmediately: startImmediately,
|
||||
}
|
||||
if singletonMode {
|
||||
job.SingletonMode()
|
||||
}
|
||||
return job
|
||||
}
|
||||
|
||||
// Name sets the name of the current job.
|
||||
//
|
||||
// If the scheduler is running using WithDistributedLocker(),
|
||||
// the job name is used as the distributed lock key.
|
||||
func (j *Job) Name(name string) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.jobName = name
|
||||
}
|
||||
|
||||
func (j *Job) setRandomInterval(a, b int) {
|
||||
j.random.rand = rand.New(rand.NewSource(time.Now().UnixNano())) // nolint
|
||||
|
||||
j.random.randomizeInterval = true
|
||||
if a < b {
|
||||
j.random.randomIntervalRange[0] = a
|
||||
j.random.randomIntervalRange[1] = b + 1
|
||||
} else {
|
||||
j.random.randomIntervalRange[0] = b
|
||||
j.random.randomIntervalRange[1] = a + 1
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Job) getRandomInterval() int {
|
||||
randNum := j.rand.Intn(j.randomIntervalRange[1] - j.randomIntervalRange[0])
|
||||
return j.randomIntervalRange[0] + randNum
|
||||
}
|
||||
|
||||
func (j *Job) getInterval() int {
|
||||
if j.randomizeInterval {
|
||||
return j.getRandomInterval()
|
||||
}
|
||||
return j.interval
|
||||
}
|
||||
|
||||
func (j *Job) neverRan() bool {
|
||||
jobLastRun := j.LastRun()
|
||||
return jobLastRun.IsZero()
|
||||
}
|
||||
|
||||
func (j *Job) getStartsImmediately() bool {
|
||||
return j.startsImmediately
|
||||
}
|
||||
|
||||
func (j *Job) setStartsImmediately(b bool) {
|
||||
j.startsImmediately = b
|
||||
}
|
||||
|
||||
func (j *Job) setTimer(t *time.Timer) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.timer = t
|
||||
}
|
||||
|
||||
func (j *Job) getFirstAtTime() time.Duration {
|
||||
var t time.Duration
|
||||
if len(j.atTimes) > 0 {
|
||||
t = j.atTimes[0]
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (j *Job) getAtTime(lastRun time.Time) time.Duration {
|
||||
var r time.Duration
|
||||
if len(j.atTimes) == 0 {
|
||||
return r
|
||||
}
|
||||
|
||||
if len(j.atTimes) == 1 {
|
||||
return j.atTimes[0]
|
||||
}
|
||||
|
||||
if lastRun.IsZero() {
|
||||
r = j.atTimes[0]
|
||||
} else {
|
||||
for _, d := range j.atTimes {
|
||||
nt := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), 0, 0, 0, 0, lastRun.Location()).Add(d)
|
||||
if nt.After(lastRun) {
|
||||
r = d
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (j *Job) addAtTime(t time.Duration) {
|
||||
if len(j.atTimes) == 0 {
|
||||
j.atTimes = append(j.atTimes, t)
|
||||
return
|
||||
}
|
||||
exist := false
|
||||
index := sort.Search(len(j.atTimes), func(i int) bool {
|
||||
atTime := j.atTimes[i]
|
||||
b := atTime >= t
|
||||
if b {
|
||||
exist = atTime == t
|
||||
}
|
||||
return b
|
||||
})
|
||||
|
||||
// ignore if present
|
||||
if exist {
|
||||
return
|
||||
}
|
||||
|
||||
j.atTimes = append(j.atTimes, time.Duration(0))
|
||||
copy(j.atTimes[index+1:], j.atTimes[index:])
|
||||
j.atTimes[index] = t
|
||||
}
|
||||
|
||||
func (j *Job) getStartAtTime() time.Time {
|
||||
j.mu.RLock()
|
||||
defer j.mu.RUnlock()
|
||||
return j.startAtTime
|
||||
}
|
||||
|
||||
func (j *Job) setStartAtTime(t time.Time) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.startAtTime = t
|
||||
}
|
||||
|
||||
func (j *Job) getUnit() schedulingUnit {
|
||||
j.mu.RLock()
|
||||
defer j.mu.RUnlock()
|
||||
return j.unit
|
||||
}
|
||||
|
||||
func (j *Job) setUnit(t schedulingUnit) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.unit = t
|
||||
}
|
||||
|
||||
func (j *Job) getDuration() time.Duration {
|
||||
j.mu.RLock()
|
||||
defer j.mu.RUnlock()
|
||||
return j.duration
|
||||
}
|
||||
|
||||
func (j *Job) setDuration(t time.Duration) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.duration = t
|
||||
}
|
||||
|
||||
// hasTags returns true if all tags are matched on this Job
|
||||
func (j *Job) hasTags(tags ...string) bool {
|
||||
// Build map of all Job tags for easy comparison
|
||||
jobTags := map[string]int{}
|
||||
for _, tag := range j.tags {
|
||||
jobTags[tag] = 0
|
||||
}
|
||||
|
||||
// Loop through required tags and if one doesn't exist, return false
|
||||
for _, tag := range tags {
|
||||
_, ok := jobTags[tag]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns an error if one occurred while creating the Job.
|
||||
// If multiple errors occurred, they will be wrapped and can be
|
||||
// checked using the standard unwrap options.
|
||||
func (j *Job) Error() error {
|
||||
return j.error
|
||||
}
|
||||
|
||||
// Context returns the job's context. The context controls cancellation.
|
||||
func (j *Job) Context() context.Context {
|
||||
return j.ctx
|
||||
}
|
||||
|
||||
// Tag allows you to add arbitrary labels to a Job that do not
|
||||
// impact the functionality of the Job
|
||||
func (j *Job) Tag(tags ...string) {
|
||||
j.tags = append(j.tags, tags...)
|
||||
}
|
||||
|
||||
// Untag removes a tag from a Job
|
||||
func (j *Job) Untag(t string) {
|
||||
var newTags []string
|
||||
for _, tag := range j.tags {
|
||||
if t != tag {
|
||||
newTags = append(newTags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
j.tags = newTags
|
||||
}
|
||||
|
||||
// Tags returns the tags attached to the Job
|
||||
func (j *Job) Tags() []string {
|
||||
return j.tags
|
||||
}
|
||||
|
||||
// EventListener functions utilize the job's name and are triggered
|
||||
// by or in the condition that the name suggests
|
||||
type EventListener func(j *Job)
|
||||
|
||||
// BeforeJobRuns is called before the job is run
|
||||
func BeforeJobRuns(eventListenerFunc func(jobName string)) EventListener {
|
||||
return func(j *Job) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.eventListeners.beforeJobRuns = eventListenerFunc
|
||||
}
|
||||
}
|
||||
|
||||
// AfterJobRuns is called after the job is run
|
||||
// This is called even when an error is returned
|
||||
func AfterJobRuns(eventListenerFunc func(jobName string)) EventListener {
|
||||
return func(j *Job) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.eventListeners.afterJobRuns = eventListenerFunc
|
||||
}
|
||||
}
|
||||
|
||||
// WhenJobReturnsError is called when the job returns an error
|
||||
func WhenJobReturnsError(eventListenerFunc func(jobName string, err error)) EventListener {
|
||||
return func(j *Job) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.eventListeners.onError = eventListenerFunc
|
||||
}
|
||||
}
|
||||
|
||||
// WhenJobReturnsNoError is called when the job does not return an error
|
||||
// the function must accept a single parameter, which is an error
|
||||
func WhenJobReturnsNoError(eventListenerFunc func(jobName string)) EventListener {
|
||||
return func(j *Job) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.eventListeners.noError = eventListenerFunc
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterEventListeners accepts EventListeners and registers them for the job
|
||||
// The event listeners are then called at the times described by each listener.
|
||||
func (j *Job) RegisterEventListeners(eventListeners ...EventListener) {
|
||||
for _, el := range eventListeners {
|
||||
el(j)
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: SetEventListeners accepts two functions that will be called, one before and one after the job is run
|
||||
func (j *Job) SetEventListeners(onBeforeJobExecution interface{}, onAfterJobExecution interface{}) {
|
||||
j.eventListeners = eventListeners{
|
||||
onBeforeJobExecution: onBeforeJobExecution,
|
||||
onAfterJobExecution: onAfterJobExecution,
|
||||
}
|
||||
}
|
||||
|
||||
// ScheduledTime returns the time of the Job's next scheduled run
|
||||
func (j *Job) ScheduledTime() time.Time {
|
||||
j.mu.RLock()
|
||||
defer j.mu.RUnlock()
|
||||
return j.nextRun
|
||||
}
|
||||
|
||||
// ScheduledAtTime returns the specific time of day the Job will run at.
|
||||
// If multiple times are set, the earliest time will be returned.
|
||||
func (j *Job) ScheduledAtTime() string {
|
||||
if len(j.atTimes) == 0 {
|
||||
return "00:00"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%02d:%02d", j.getFirstAtTime()/time.Hour, (j.getFirstAtTime()%time.Hour)/time.Minute)
|
||||
}
|
||||
|
||||
// ScheduledAtTimes returns the specific times of day the Job will run at
|
||||
func (j *Job) ScheduledAtTimes() []string {
|
||||
r := make([]string, len(j.atTimes))
|
||||
for i, t := range j.atTimes {
|
||||
r[i] = fmt.Sprintf("%02d:%02d", t/time.Hour, (t%time.Hour)/time.Minute)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Weekday returns which day of the week the Job will run on and
|
||||
// will return an error if the Job is not scheduled weekly
|
||||
func (j *Job) Weekday() (time.Weekday, error) {
|
||||
if len(j.scheduledWeekdays) == 0 {
|
||||
return time.Sunday, ErrNotScheduledWeekday
|
||||
}
|
||||
return j.scheduledWeekdays[0], nil
|
||||
}
|
||||
|
||||
// Weekdays returns a slice of time.Weekday that the Job will run in a week and
|
||||
// will return an error if the Job is not scheduled weekly
|
||||
func (j *Job) Weekdays() []time.Weekday {
|
||||
// appending on j.scheduledWeekdays may cause a side effect
|
||||
if len(j.scheduledWeekdays) == 0 {
|
||||
return []time.Weekday{time.Sunday}
|
||||
}
|
||||
|
||||
return j.scheduledWeekdays
|
||||
}
|
||||
|
||||
// LimitRunsTo limits the number of executions of this job to n.
|
||||
// Upon reaching the limit, the job is removed from the scheduler.
|
||||
//
|
||||
// Note: If a job is added to a running scheduler and this method is then used
|
||||
// you may see the job run more than the set limit as job is scheduled immediately
|
||||
// by default upon being added to the scheduler. It is recommended to use the
|
||||
// LimitRunsTo() func on the scheduler chain when scheduling the job.
|
||||
// For example: scheduler.LimitRunsTo(1).Do()
|
||||
func (j *Job) LimitRunsTo(n int) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.runConfig.finiteRuns = true
|
||||
j.runConfig.maxRuns = n
|
||||
}
|
||||
|
||||
// SingletonMode prevents a new job from starting if the prior job has not yet
|
||||
// completed it's run
|
||||
// Note: If a job is added to a running scheduler and this method is then used
|
||||
// you may see the job run overrun itself as job is scheduled immediately
|
||||
// by default upon being added to the scheduler. It is recommended to use the
|
||||
// SingletonMode() func on the scheduler chain when scheduling the job.
|
||||
func (j *Job) SingletonMode() {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.runConfig.mode = singletonMode
|
||||
|
||||
j.jobFunction.singletonWgMu = &sync.Mutex{}
|
||||
j.jobFunction.singletonWgMu.Lock()
|
||||
j.jobFunction.singletonWg = &sync.WaitGroup{}
|
||||
j.jobFunction.singletonWgMu.Unlock()
|
||||
|
||||
j.jobFunction.singletonQueueMu = &sync.Mutex{}
|
||||
j.jobFunction.singletonQueueMu.Lock()
|
||||
j.jobFunction.singletonQueue = make(chan struct{}, 100)
|
||||
j.jobFunction.singletonQueueMu.Unlock()
|
||||
}
|
||||
|
||||
// shouldRun evaluates if this job should run again
|
||||
// based on the runConfig
|
||||
func (j *Job) shouldRun() bool {
|
||||
j.mu.RLock()
|
||||
defer j.mu.RUnlock()
|
||||
return !j.runConfig.finiteRuns || j.runStartCount.Load() < int64(j.runConfig.maxRuns)
|
||||
}
|
||||
|
||||
// LastRun returns the time the job was run last
|
||||
func (j *Job) LastRun() time.Time {
|
||||
j.jobRunTimesMu.Lock()
|
||||
defer j.jobRunTimesMu.Unlock()
|
||||
return j.lastRun
|
||||
}
|
||||
|
||||
func (j *Job) setLastRun(t time.Time) {
|
||||
j.previousRun = j.lastRun
|
||||
j.lastRun = t
|
||||
}
|
||||
|
||||
// NextRun returns the time the job will run next
|
||||
func (j *Job) NextRun() time.Time {
|
||||
j.jobRunTimesMu.Lock()
|
||||
defer j.jobRunTimesMu.Unlock()
|
||||
return j.nextRun
|
||||
}
|
||||
|
||||
func (j *Job) setNextRun(t time.Time) {
|
||||
j.jobRunTimesMu.Lock()
|
||||
defer j.jobRunTimesMu.Unlock()
|
||||
j.nextRun = t
|
||||
j.jobFunction.jobFuncNextRun = t
|
||||
}
|
||||
|
||||
// PreviousRun returns the job run time previous to LastRun
|
||||
func (j *Job) PreviousRun() time.Time {
|
||||
j.jobRunTimesMu.Lock()
|
||||
defer j.jobRunTimesMu.Unlock()
|
||||
return j.previousRun
|
||||
}
|
||||
|
||||
// RunCount returns the number of times the job has been started
|
||||
func (j *Job) RunCount() int {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
return int(j.runStartCount.Load())
|
||||
}
|
||||
|
||||
// FinishedRunCount returns the number of times the job has finished running
|
||||
func (j *Job) FinishedRunCount() int {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
return int(j.runFinishCount.Load())
|
||||
}
|
||||
|
||||
func (j *Job) stop() {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
if j.timer != nil {
|
||||
j.timer.Stop()
|
||||
}
|
||||
if j.cancel != nil {
|
||||
j.cancel()
|
||||
j.ctx, j.cancel = context.WithCancel(context.Background())
|
||||
}
|
||||
j.stopped.Store(true)
|
||||
}
|
||||
|
||||
// IsRunning reports whether any instances of the job function are currently running
|
||||
func (j *Job) IsRunning() bool {
|
||||
return j.isRunning.Load()
|
||||
}
|
||||
|
||||
// you must Lock the job before calling copy
|
||||
func (j *Job) copy() Job {
|
||||
return Job{
|
||||
mu: &jobMutex{},
|
||||
jobFunction: j.jobFunction,
|
||||
interval: j.interval,
|
||||
duration: j.duration,
|
||||
unit: j.unit,
|
||||
startsImmediately: j.startsImmediately,
|
||||
atTimes: j.atTimes,
|
||||
startAtTime: j.startAtTime,
|
||||
error: j.error,
|
||||
scheduledWeekdays: j.scheduledWeekdays,
|
||||
daysOfTheMonth: j.daysOfTheMonth,
|
||||
tags: j.tags,
|
||||
timer: j.timer,
|
||||
cronSchedule: j.cronSchedule,
|
||||
runWithDetails: j.runWithDetails,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package gocron
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFailedToConnectToRedis = errors.New("gocron: failed to connect to redis")
|
||||
ErrFailedToObtainLock = errors.New("gocron: failed to obtain lock")
|
||||
ErrFailedToReleaseLock = errors.New("gocron: failed to release lock")
|
||||
)
|
||||
|
||||
// Locker represents the required interface to lock jobs when running multiple schedulers.
|
||||
type Locker interface {
|
||||
// Lock if an error is returned by lock, the job will not be scheduled.
|
||||
Lock(ctx context.Context, key string) (Lock, error)
|
||||
}
|
||||
|
||||
// Lock represents an obtained lock
|
||||
type Lock interface {
|
||||
Unlock(ctx context.Context) error
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,33 @@
|
|||
package gocron
|
||||
|
||||
import "time"
|
||||
|
||||
var _ TimeWrapper = (*trueTime)(nil)
|
||||
|
||||
// TimeWrapper is an interface that wraps the Now, Sleep, and Unix methods of the time package.
|
||||
// This allows the library and users to mock the time package for testing.
|
||||
type TimeWrapper interface {
|
||||
Now(*time.Location) time.Time
|
||||
Unix(int64, int64) time.Time
|
||||
Sleep(time.Duration)
|
||||
}
|
||||
|
||||
type trueTime struct{}
|
||||
|
||||
func (t *trueTime) Now(location *time.Location) time.Time {
|
||||
return time.Now().In(location)
|
||||
}
|
||||
|
||||
func (t *trueTime) Unix(sec int64, nsec int64) time.Time {
|
||||
return time.Unix(sec, nsec)
|
||||
}
|
||||
|
||||
func (t *trueTime) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
// afterFunc proxies the time.AfterFunc function.
|
||||
// This allows it to be mocked for testing.
|
||||
func afterFunc(d time.Duration, f func()) *time.Timer {
|
||||
return time.AfterFunc(d, f)
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -0,0 +1 @@
|
|||
language: go
|
|
@ -0,0 +1,21 @@
|
|||
Copyright (C) 2012 Rob Figueiredo
|
||||
All Rights Reserved.
|
||||
|
||||
MIT LICENSE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,125 @@
|
|||
[](http://godoc.org/github.com/robfig/cron)
|
||||
[](https://travis-ci.org/robfig/cron)
|
||||
|
||||
# cron
|
||||
|
||||
Cron V3 has been released!
|
||||
|
||||
To download the specific tagged release, run:
|
||||
|
||||
go get github.com/robfig/cron/v3@v3.0.0
|
||||
|
||||
Import it in your program as:
|
||||
|
||||
import "github.com/robfig/cron/v3"
|
||||
|
||||
It requires Go 1.11 or later due to usage of Go Modules.
|
||||
|
||||
Refer to the documentation here:
|
||||
http://godoc.org/github.com/robfig/cron
|
||||
|
||||
The rest of this document describes the the advances in v3 and a list of
|
||||
breaking changes for users that wish to upgrade from an earlier version.
|
||||
|
||||
## Upgrading to v3 (June 2019)
|
||||
|
||||
cron v3 is a major upgrade to the library that addresses all outstanding bugs,
|
||||
feature requests, and rough edges. It is based on a merge of master which
|
||||
contains various fixes to issues found over the years and the v2 branch which
|
||||
contains some backwards-incompatible features like the ability to remove cron
|
||||
jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like
|
||||
the timezone support, and fixes a number of bugs.
|
||||
|
||||
New features:
|
||||
|
||||
- Support for Go modules. Callers must now import this library as
|
||||
`github.com/robfig/cron/v3`, instead of `gopkg.in/...`
|
||||
|
||||
- Fixed bugs:
|
||||
- 0f01e6b parser: fix combining of Dow and Dom (#70)
|
||||
- dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157)
|
||||
- eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144)
|
||||
- 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97)
|
||||
- 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206)
|
||||
|
||||
- Standard cron spec parsing by default (first field is "minute"), with an easy
|
||||
way to opt into the seconds field (quartz-compatible). Although, note that the
|
||||
year field (optional in Quartz) is not supported.
|
||||
|
||||
- Extensible, key/value logging via an interface that complies with
|
||||
the https://github.com/go-logr/logr project.
|
||||
|
||||
- The new Chain & JobWrapper types allow you to install "interceptors" to add
|
||||
cross-cutting behavior like the following:
|
||||
- Recover any panics from jobs
|
||||
- Delay a job's execution if the previous run hasn't completed yet
|
||||
- Skip a job's execution if the previous run hasn't completed yet
|
||||
- Log each job's invocations
|
||||
- Notification when jobs are completed
|
||||
|
||||
It is backwards incompatible with both v1 and v2. These updates are required:
|
||||
|
||||
- The v1 branch accepted an optional seconds field at the beginning of the cron
|
||||
spec. This is non-standard and has led to a lot of confusion. The new default
|
||||
parser conforms to the standard as described by [the Cron wikipedia page].
|
||||
|
||||
UPDATING: To retain the old behavior, construct your Cron with a custom
|
||||
parser:
|
||||
|
||||
// Seconds field, required
|
||||
cron.New(cron.WithSeconds())
|
||||
|
||||
// Seconds field, optional
|
||||
cron.New(
|
||||
cron.WithParser(
|
||||
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))
|
||||
|
||||
- The Cron type now accepts functional options on construction rather than the
|
||||
previous ad-hoc behavior modification mechanisms (setting a field, calling a setter).
|
||||
|
||||
UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be
|
||||
updated to provide those values on construction.
|
||||
|
||||
- CRON_TZ is now the recommended way to specify the timezone of a single
|
||||
schedule, which is sanctioned by the specification. The legacy "TZ=" prefix
|
||||
will continue to be supported since it is unambiguous and easy to do so.
|
||||
|
||||
UPDATING: No update is required.
|
||||
|
||||
- By default, cron will no longer recover panics in jobs that it runs.
|
||||
Recovering can be surprising (see issue #192) and seems to be at odds with
|
||||
typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option
|
||||
has been removed to accommodate the more general JobWrapper type.
|
||||
|
||||
UPDATING: To opt into panic recovery and configure the panic logger:
|
||||
|
||||
cron.New(cron.WithChain(
|
||||
cron.Recover(logger), // or use cron.DefaultLogger
|
||||
))
|
||||
|
||||
- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was
|
||||
removed, since it is duplicative with the leveled logging.
|
||||
|
||||
UPDATING: Callers should use `WithLogger` and specify a logger that does not
|
||||
discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`:
|
||||
|
||||
cron.New(
|
||||
cron.WithLogger(cron.VerbosePrintfLogger(logger)))
|
||||
|
||||
|
||||
### Background - Cron spec format
|
||||
|
||||
There are two cron spec formats in common usage:
|
||||
|
||||
- The "standard" cron format, described on [the Cron wikipedia page] and used by
|
||||
the cron Linux system utility.
|
||||
|
||||
- The cron format used by [the Quartz Scheduler], commonly used for scheduled
|
||||
jobs in Java software
|
||||
|
||||
[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron
|
||||
[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html
|
||||
|
||||
The original version of this package included an optional "seconds" field, which
|
||||
made it incompatible with both of these formats. Now, the "standard" format is
|
||||
the default format accepted, and the Quartz format is opt-in.
|
|
@ -0,0 +1,92 @@
|
|||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// JobWrapper decorates the given Job with some behavior.
|
||||
type JobWrapper func(Job) Job
|
||||
|
||||
// Chain is a sequence of JobWrappers that decorates submitted jobs with
|
||||
// cross-cutting behaviors like logging or synchronization.
|
||||
type Chain struct {
|
||||
wrappers []JobWrapper
|
||||
}
|
||||
|
||||
// NewChain returns a Chain consisting of the given JobWrappers.
|
||||
func NewChain(c ...JobWrapper) Chain {
|
||||
return Chain{c}
|
||||
}
|
||||
|
||||
// Then decorates the given job with all JobWrappers in the chain.
|
||||
//
|
||||
// This:
|
||||
// NewChain(m1, m2, m3).Then(job)
|
||||
// is equivalent to:
|
||||
// m1(m2(m3(job)))
|
||||
func (c Chain) Then(j Job) Job {
|
||||
for i := range c.wrappers {
|
||||
j = c.wrappers[len(c.wrappers)-i-1](j)
|
||||
}
|
||||
return j
|
||||
}
|
||||
|
||||
// Recover panics in wrapped jobs and log them with the provided logger.
|
||||
func Recover(logger Logger) JobWrapper {
|
||||
return func(j Job) Job {
|
||||
return FuncJob(func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
const size = 64 << 10
|
||||
buf := make([]byte, size)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
err, ok := r.(error)
|
||||
if !ok {
|
||||
err = fmt.Errorf("%v", r)
|
||||
}
|
||||
logger.Error(err, "panic", "stack", "...\n"+string(buf))
|
||||
}
|
||||
}()
|
||||
j.Run()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DelayIfStillRunning serializes jobs, delaying subsequent runs until the
|
||||
// previous one is complete. Jobs running after a delay of more than a minute
|
||||
// have the delay logged at Info.
|
||||
func DelayIfStillRunning(logger Logger) JobWrapper {
|
||||
return func(j Job) Job {
|
||||
var mu sync.Mutex
|
||||
return FuncJob(func() {
|
||||
start := time.Now()
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if dur := time.Since(start); dur > time.Minute {
|
||||
logger.Info("delay", "duration", dur)
|
||||
}
|
||||
j.Run()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfStillRunning skips an invocation of the Job if a previous invocation is
|
||||
// still running. It logs skips to the given logger at Info level.
|
||||
func SkipIfStillRunning(logger Logger) JobWrapper {
|
||||
return func(j Job) Job {
|
||||
var ch = make(chan struct{}, 1)
|
||||
ch <- struct{}{}
|
||||
return FuncJob(func() {
|
||||
select {
|
||||
case v := <-ch:
|
||||
j.Run()
|
||||
ch <- v
|
||||
default:
|
||||
logger.Info("skip")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package cron
|
||||
|
||||
import "time"
|
||||
|
||||
// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
|
||||
// It does not support jobs more frequent than once a second.
|
||||
type ConstantDelaySchedule struct {
|
||||
Delay time.Duration
|
||||
}
|
||||
|
||||
// Every returns a crontab Schedule that activates once every duration.
|
||||
// Delays of less than a second are not supported (will round up to 1 second).
|
||||
// Any fields less than a Second are truncated.
|
||||
func Every(duration time.Duration) ConstantDelaySchedule {
|
||||
if duration < time.Second {
|
||||
duration = time.Second
|
||||
}
|
||||
return ConstantDelaySchedule{
|
||||
Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns the next time this should be run.
|
||||
// This rounds so that the next activation time will be on the second.
|
||||
func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
|
||||
return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
|
||||
}
|
|
@ -0,0 +1,355 @@
|
|||
package cron
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cron keeps track of any number of entries, invoking the associated func as
|
||||
// specified by the schedule. It may be started, stopped, and the entries may
|
||||
// be inspected while running.
|
||||
type Cron struct {
|
||||
entries []*Entry
|
||||
chain Chain
|
||||
stop chan struct{}
|
||||
add chan *Entry
|
||||
remove chan EntryID
|
||||
snapshot chan chan []Entry
|
||||
running bool
|
||||
logger Logger
|
||||
runningMu sync.Mutex
|
||||
location *time.Location
|
||||
parser ScheduleParser
|
||||
nextID EntryID
|
||||
jobWaiter sync.WaitGroup
|
||||
}
|
||||
|
||||
// ScheduleParser is an interface for schedule spec parsers that return a Schedule
|
||||
type ScheduleParser interface {
|
||||
Parse(spec string) (Schedule, error)
|
||||
}
|
||||
|
||||
// Job is an interface for submitted cron jobs.
|
||||
type Job interface {
|
||||
Run()
|
||||
}
|
||||
|
||||
// Schedule describes a job's duty cycle.
|
||||
type Schedule interface {
|
||||
// Next returns the next activation time, later than the given time.
|
||||
// Next is invoked initially, and then each time the job is run.
|
||||
Next(time.Time) time.Time
|
||||
}
|
||||
|
||||
// EntryID identifies an entry within a Cron instance
|
||||
type EntryID int
|
||||
|
||||
// Entry consists of a schedule and the func to execute on that schedule.
|
||||
type Entry struct {
|
||||
// ID is the cron-assigned ID of this entry, which may be used to look up a
|
||||
// snapshot or remove it.
|
||||
ID EntryID
|
||||
|
||||
// Schedule on which this job should be run.
|
||||
Schedule Schedule
|
||||
|
||||
// Next time the job will run, or the zero time if Cron has not been
|
||||
// started or this entry's schedule is unsatisfiable
|
||||
Next time.Time
|
||||
|
||||
// Prev is the last time this job was run, or the zero time if never.
|
||||
Prev time.Time
|
||||
|
||||
// WrappedJob is the thing to run when the Schedule is activated.
|
||||
WrappedJob Job
|
||||
|
||||
// Job is the thing that was submitted to cron.
|
||||
// It is kept around so that user code that needs to get at the job later,
|
||||
// e.g. via Entries() can do so.
|
||||
Job Job
|
||||
}
|
||||
|
||||
// Valid returns true if this is not the zero entry.
|
||||
func (e Entry) Valid() bool { return e.ID != 0 }
|
||||
|
||||
// byTime is a wrapper for sorting the entry array by time
|
||||
// (with zero time at the end).
|
||||
type byTime []*Entry
|
||||
|
||||
func (s byTime) Len() int { return len(s) }
|
||||
func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s byTime) Less(i, j int) bool {
|
||||
// Two zero times should return false.
|
||||
// Otherwise, zero is "greater" than any other time.
|
||||
// (To sort it at the end of the list.)
|
||||
if s[i].Next.IsZero() {
|
||||
return false
|
||||
}
|
||||
if s[j].Next.IsZero() {
|
||||
return true
|
||||
}
|
||||
return s[i].Next.Before(s[j].Next)
|
||||
}
|
||||
|
||||
// New returns a new Cron job runner, modified by the given options.
|
||||
//
|
||||
// Available Settings
|
||||
//
|
||||
// Time Zone
|
||||
// Description: The time zone in which schedules are interpreted
|
||||
// Default: time.Local
|
||||
//
|
||||
// Parser
|
||||
// Description: Parser converts cron spec strings into cron.Schedules.
|
||||
// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron
|
||||
//
|
||||
// Chain
|
||||
// Description: Wrap submitted jobs to customize behavior.
|
||||
// Default: A chain that recovers panics and logs them to stderr.
|
||||
//
|
||||
// See "cron.With*" to modify the default behavior.
|
||||
func New(opts ...Option) *Cron {
|
||||
c := &Cron{
|
||||
entries: nil,
|
||||
chain: NewChain(),
|
||||
add: make(chan *Entry),
|
||||
stop: make(chan struct{}),
|
||||
snapshot: make(chan chan []Entry),
|
||||
remove: make(chan EntryID),
|
||||
running: false,
|
||||
runningMu: sync.Mutex{},
|
||||
logger: DefaultLogger,
|
||||
location: time.Local,
|
||||
parser: standardParser,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// FuncJob is a wrapper that turns a func() into a cron.Job
|
||||
type FuncJob func()
|
||||
|
||||
func (f FuncJob) Run() { f() }
|
||||
|
||||
// AddFunc adds a func to the Cron to be run on the given schedule.
|
||||
// The spec is parsed using the time zone of this Cron instance as the default.
|
||||
// An opaque ID is returned that can be used to later remove it.
|
||||
func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) {
|
||||
return c.AddJob(spec, FuncJob(cmd))
|
||||
}
|
||||
|
||||
// AddJob adds a Job to the Cron to be run on the given schedule.
|
||||
// The spec is parsed using the time zone of this Cron instance as the default.
|
||||
// An opaque ID is returned that can be used to later remove it.
|
||||
func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) {
|
||||
schedule, err := c.parser.Parse(spec)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return c.Schedule(schedule, cmd), nil
|
||||
}
|
||||
|
||||
// Schedule adds a Job to the Cron to be run on the given schedule.
|
||||
// The job is wrapped with the configured Chain.
|
||||
func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
c.nextID++
|
||||
entry := &Entry{
|
||||
ID: c.nextID,
|
||||
Schedule: schedule,
|
||||
WrappedJob: c.chain.Then(cmd),
|
||||
Job: cmd,
|
||||
}
|
||||
if !c.running {
|
||||
c.entries = append(c.entries, entry)
|
||||
} else {
|
||||
c.add <- entry
|
||||
}
|
||||
return entry.ID
|
||||
}
|
||||
|
||||
// Entries returns a snapshot of the cron entries.
|
||||
func (c *Cron) Entries() []Entry {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
if c.running {
|
||||
replyChan := make(chan []Entry, 1)
|
||||
c.snapshot <- replyChan
|
||||
return <-replyChan
|
||||
}
|
||||
return c.entrySnapshot()
|
||||
}
|
||||
|
||||
// Location gets the time zone location
|
||||
func (c *Cron) Location() *time.Location {
|
||||
return c.location
|
||||
}
|
||||
|
||||
// Entry returns a snapshot of the given entry, or nil if it couldn't be found.
|
||||
func (c *Cron) Entry(id EntryID) Entry {
|
||||
for _, entry := range c.Entries() {
|
||||
if id == entry.ID {
|
||||
return entry
|
||||
}
|
||||
}
|
||||
return Entry{}
|
||||
}
|
||||
|
||||
// Remove an entry from being run in the future.
|
||||
func (c *Cron) Remove(id EntryID) {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
if c.running {
|
||||
c.remove <- id
|
||||
} else {
|
||||
c.removeEntry(id)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the cron scheduler in its own goroutine, or no-op if already started.
|
||||
func (c *Cron) Start() {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
if c.running {
|
||||
return
|
||||
}
|
||||
c.running = true
|
||||
go c.run()
|
||||
}
|
||||
|
||||
// Run the cron scheduler, or no-op if already running.
|
||||
func (c *Cron) Run() {
|
||||
c.runningMu.Lock()
|
||||
if c.running {
|
||||
c.runningMu.Unlock()
|
||||
return
|
||||
}
|
||||
c.running = true
|
||||
c.runningMu.Unlock()
|
||||
c.run()
|
||||
}
|
||||
|
||||
// run the scheduler.. this is private just due to the need to synchronize
|
||||
// access to the 'running' state variable.
|
||||
func (c *Cron) run() {
|
||||
c.logger.Info("start")
|
||||
|
||||
// Figure out the next activation times for each entry.
|
||||
now := c.now()
|
||||
for _, entry := range c.entries {
|
||||
entry.Next = entry.Schedule.Next(now)
|
||||
c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next)
|
||||
}
|
||||
|
||||
for {
|
||||
// Determine the next entry to run.
|
||||
sort.Sort(byTime(c.entries))
|
||||
|
||||
var timer *time.Timer
|
||||
if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
|
||||
// If there are no entries yet, just sleep - it still handles new entries
|
||||
// and stop requests.
|
||||
timer = time.NewTimer(100000 * time.Hour)
|
||||
} else {
|
||||
timer = time.NewTimer(c.entries[0].Next.Sub(now))
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case now = <-timer.C:
|
||||
now = now.In(c.location)
|
||||
c.logger.Info("wake", "now", now)
|
||||
|
||||
// Run every entry whose next time was less than now
|
||||
for _, e := range c.entries {
|
||||
if e.Next.After(now) || e.Next.IsZero() {
|
||||
break
|
||||
}
|
||||
c.startJob(e.WrappedJob)
|
||||
e.Prev = e.Next
|
||||
e.Next = e.Schedule.Next(now)
|
||||
c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next)
|
||||
}
|
||||
|
||||
case newEntry := <-c.add:
|
||||
timer.Stop()
|
||||
now = c.now()
|
||||
newEntry.Next = newEntry.Schedule.Next(now)
|
||||
c.entries = append(c.entries, newEntry)
|
||||
c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next)
|
||||
|
||||
case replyChan := <-c.snapshot:
|
||||
replyChan <- c.entrySnapshot()
|
||||
continue
|
||||
|
||||
case <-c.stop:
|
||||
timer.Stop()
|
||||
c.logger.Info("stop")
|
||||
return
|
||||
|
||||
case id := <-c.remove:
|
||||
timer.Stop()
|
||||
now = c.now()
|
||||
c.removeEntry(id)
|
||||
c.logger.Info("removed", "entry", id)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startJob runs the given job in a new goroutine.
|
||||
func (c *Cron) startJob(j Job) {
|
||||
c.jobWaiter.Add(1)
|
||||
go func() {
|
||||
defer c.jobWaiter.Done()
|
||||
j.Run()
|
||||
}()
|
||||
}
|
||||
|
||||
// now returns current time in c location
|
||||
func (c *Cron) now() time.Time {
|
||||
return time.Now().In(c.location)
|
||||
}
|
||||
|
||||
// Stop stops the cron scheduler if it is running; otherwise it does nothing.
|
||||
// A context is returned so the caller can wait for running jobs to complete.
|
||||
func (c *Cron) Stop() context.Context {
|
||||
c.runningMu.Lock()
|
||||
defer c.runningMu.Unlock()
|
||||
if c.running {
|
||||
c.stop <- struct{}{}
|
||||
c.running = false
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
c.jobWaiter.Wait()
|
||||
cancel()
|
||||
}()
|
||||
return ctx
|
||||
}
|
||||
|
||||
// entrySnapshot returns a copy of the current cron entry list.
|
||||
func (c *Cron) entrySnapshot() []Entry {
|
||||
var entries = make([]Entry, len(c.entries))
|
||||
for i, e := range c.entries {
|
||||
entries[i] = *e
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
func (c *Cron) removeEntry(id EntryID) {
|
||||
var entries []*Entry
|
||||
for _, e := range c.entries {
|
||||
if e.ID != id {
|
||||
entries = append(entries, e)
|
||||
}
|
||||
}
|
||||
c.entries = entries
|
||||
}
|
|
@ -0,0 +1,231 @@
|
|||
/*
|
||||
Package cron implements a cron spec parser and job runner.
|
||||
|
||||
Installation
|
||||
|
||||
To download the specific tagged release, run:
|
||||
|
||||
go get github.com/robfig/cron/v3@v3.0.0
|
||||
|
||||
Import it in your program as:
|
||||
|
||||
import "github.com/robfig/cron/v3"
|
||||
|
||||
It requires Go 1.11 or later due to usage of Go Modules.
|
||||
|
||||
Usage
|
||||
|
||||
Callers may register Funcs to be invoked on a given schedule. Cron will run
|
||||
them in their own goroutines.
|
||||
|
||||
c := cron.New()
|
||||
c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") })
|
||||
c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") })
|
||||
c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") })
|
||||
c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") })
|
||||
c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") })
|
||||
c.Start()
|
||||
..
|
||||
// Funcs are invoked in their own goroutine, asynchronously.
|
||||
...
|
||||
// Funcs may also be added to a running Cron
|
||||
c.AddFunc("@daily", func() { fmt.Println("Every day") })
|
||||
..
|
||||
// Inspect the cron job entries' next and previous run times.
|
||||
inspect(c.Entries())
|
||||
..
|
||||
c.Stop() // Stop the scheduler (does not stop any jobs already running).
|
||||
|
||||
CRON Expression Format
|
||||
|
||||
A cron expression represents a set of times, using 5 space-separated fields.
|
||||
|
||||
Field name | Mandatory? | Allowed values | Allowed special characters
|
||||
---------- | ---------- | -------------- | --------------------------
|
||||
Minutes | Yes | 0-59 | * / , -
|
||||
Hours | Yes | 0-23 | * / , -
|
||||
Day of month | Yes | 1-31 | * / , - ?
|
||||
Month | Yes | 1-12 or JAN-DEC | * / , -
|
||||
Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
|
||||
|
||||
Month and Day-of-week field values are case insensitive. "SUN", "Sun", and
|
||||
"sun" are equally accepted.
|
||||
|
||||
The specific interpretation of the format is based on the Cron Wikipedia page:
|
||||
https://en.wikipedia.org/wiki/Cron
|
||||
|
||||
Alternative Formats
|
||||
|
||||
Alternative Cron expression formats support other fields like seconds. You can
|
||||
implement that by creating a custom Parser as follows.
|
||||
|
||||
cron.New(
|
||||
cron.WithParser(
|
||||
cron.NewParser(
|
||||
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)))
|
||||
|
||||
Since adding Seconds is the most common modification to the standard cron spec,
|
||||
cron provides a builtin function to do that, which is equivalent to the custom
|
||||
parser you saw earlier, except that its seconds field is REQUIRED:
|
||||
|
||||
cron.New(cron.WithSeconds())
|
||||
|
||||
That emulates Quartz, the most popular alternative Cron schedule format:
|
||||
http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html
|
||||
|
||||
Special Characters
|
||||
|
||||
Asterisk ( * )
|
||||
|
||||
The asterisk indicates that the cron expression will match for all values of the
|
||||
field; e.g., using an asterisk in the 5th field (month) would indicate every
|
||||
month.
|
||||
|
||||
Slash ( / )
|
||||
|
||||
Slashes are used to describe increments of ranges. For example 3-59/15 in the
|
||||
1st field (minutes) would indicate the 3rd minute of the hour and every 15
|
||||
minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
|
||||
that is, an increment over the largest possible range of the field. The form
|
||||
"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
|
||||
increment until the end of that specific range. It does not wrap around.
|
||||
|
||||
Comma ( , )
|
||||
|
||||
Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
|
||||
the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
|
||||
|
||||
Hyphen ( - )
|
||||
|
||||
Hyphens are used to define ranges. For example, 9-17 would indicate every
|
||||
hour between 9am and 5pm inclusive.
|
||||
|
||||
Question mark ( ? )
|
||||
|
||||
Question mark may be used instead of '*' for leaving either day-of-month or
|
||||
day-of-week blank.
|
||||
|
||||
Predefined schedules
|
||||
|
||||
You may use one of several pre-defined schedules in place of a cron expression.
|
||||
|
||||
Entry | Description | Equivalent To
|
||||
----- | ----------- | -------------
|
||||
@yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 *
|
||||
@monthly | Run once a month, midnight, first of month | 0 0 1 * *
|
||||
@weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0
|
||||
@daily (or @midnight) | Run once a day, midnight | 0 0 * * *
|
||||
@hourly | Run once an hour, beginning of hour | 0 * * * *
|
||||
|
||||
Intervals
|
||||
|
||||
You may also schedule a job to execute at fixed intervals, starting at the time it's added
|
||||
or cron is run. This is supported by formatting the cron spec like this:
|
||||
|
||||
@every <duration>
|
||||
|
||||
where "duration" is a string accepted by time.ParseDuration
|
||||
(http://golang.org/pkg/time/#ParseDuration).
|
||||
|
||||
For example, "@every 1h30m10s" would indicate a schedule that activates after
|
||||
1 hour, 30 minutes, 10 seconds, and then every interval after that.
|
||||
|
||||
Note: The interval does not take the job runtime into account. For example,
|
||||
if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
|
||||
it will have only 2 minutes of idle time between each run.
|
||||
|
||||
Time zones
|
||||
|
||||
By default, all interpretation and scheduling is done in the machine's local
|
||||
time zone (time.Local). You can specify a different time zone on construction:
|
||||
|
||||
cron.New(
|
||||
cron.WithLocation(time.UTC))
|
||||
|
||||
Individual cron schedules may also override the time zone they are to be
|
||||
interpreted in by providing an additional space-separated field at the beginning
|
||||
of the cron spec, of the form "CRON_TZ=Asia/Tokyo".
|
||||
|
||||
For example:
|
||||
|
||||
# Runs at 6am in time.Local
|
||||
cron.New().AddFunc("0 6 * * ?", ...)
|
||||
|
||||
# Runs at 6am in America/New_York
|
||||
nyc, _ := time.LoadLocation("America/New_York")
|
||||
c := cron.New(cron.WithLocation(nyc))
|
||||
c.AddFunc("0 6 * * ?", ...)
|
||||
|
||||
# Runs at 6am in Asia/Tokyo
|
||||
cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
|
||||
|
||||
# Runs at 6am in Asia/Tokyo
|
||||
c := cron.New(cron.WithLocation(nyc))
|
||||
c.SetLocation("America/New_York")
|
||||
c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
|
||||
|
||||
The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility.
|
||||
|
||||
Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
|
||||
not be run!
|
||||
|
||||
Job Wrappers
|
||||
|
||||
A Cron runner may be configured with a chain of job wrappers to add
|
||||
cross-cutting functionality to all submitted jobs. For example, they may be used
|
||||
to achieve the following effects:
|
||||
|
||||
- Recover any panics from jobs (activated by default)
|
||||
- Delay a job's execution if the previous run hasn't completed yet
|
||||
- Skip a job's execution if the previous run hasn't completed yet
|
||||
- Log each job's invocations
|
||||
|
||||
Install wrappers for all jobs added to a cron using the `cron.WithChain` option:
|
||||
|
||||
cron.New(cron.WithChain(
|
||||
cron.SkipIfStillRunning(logger),
|
||||
))
|
||||
|
||||
Install wrappers for individual jobs by explicitly wrapping them:
|
||||
|
||||
job = cron.NewChain(
|
||||
cron.SkipIfStillRunning(logger),
|
||||
).Then(job)
|
||||
|
||||
Thread safety
|
||||
|
||||
Since the Cron service runs concurrently with the calling code, some amount of
|
||||
care must be taken to ensure proper synchronization.
|
||||
|
||||
All cron methods are designed to be correctly synchronized as long as the caller
|
||||
ensures that invocations have a clear happens-before ordering between them.
|
||||
|
||||
Logging
|
||||
|
||||
Cron defines a Logger interface that is a subset of the one defined in
|
||||
github.com/go-logr/logr. It has two logging levels (Info and Error), and
|
||||
parameters are key/value pairs. This makes it possible for cron logging to plug
|
||||
into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided
|
||||
to wrap the standard library *log.Logger.
|
||||
|
||||
For additional insight into Cron operations, verbose logging may be activated
|
||||
which will record job runs, scheduling decisions, and added or removed jobs.
|
||||
Activate it with a one-off logger as follows:
|
||||
|
||||
cron.New(
|
||||
cron.WithLogger(
|
||||
cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))))
|
||||
|
||||
|
||||
Implementation
|
||||
|
||||
Cron entries are stored in an array, sorted by their next activation time. Cron
|
||||
sleeps until the next job is due to be run.
|
||||
|
||||
Upon waking:
|
||||
- it runs each entry that is active on that second
|
||||
- it calculates the next run times for the jobs that were run
|
||||
- it re-sorts the array of entries by next activation time.
|
||||
- it goes to sleep until the soonest job.
|
||||
*/
|
||||
package cron
|
|
@ -0,0 +1,86 @@
|
|||
package cron
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultLogger is used by Cron if none is specified.
|
||||
var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))
|
||||
|
||||
// DiscardLogger can be used by callers to discard all log messages.
|
||||
var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0))
|
||||
|
||||
// Logger is the interface used in this package for logging, so that any backend
|
||||
// can be plugged in. It is a subset of the github.com/go-logr/logr interface.
|
||||
type Logger interface {
|
||||
// Info logs routine messages about cron's operation.
|
||||
Info(msg string, keysAndValues ...interface{})
|
||||
// Error logs an error condition.
|
||||
Error(err error, msg string, keysAndValues ...interface{})
|
||||
}
|
||||
|
||||
// PrintfLogger wraps a Printf-based logger (such as the standard library "log")
|
||||
// into an implementation of the Logger interface which logs errors only.
|
||||
func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
|
||||
return printfLogger{l, false}
|
||||
}
|
||||
|
||||
// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library
|
||||
// "log") into an implementation of the Logger interface which logs everything.
|
||||
func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
|
||||
return printfLogger{l, true}
|
||||
}
|
||||
|
||||
type printfLogger struct {
|
||||
logger interface{ Printf(string, ...interface{}) }
|
||||
logInfo bool
|
||||
}
|
||||
|
||||
func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||
if pl.logInfo {
|
||||
keysAndValues = formatTimes(keysAndValues)
|
||||
pl.logger.Printf(
|
||||
formatString(len(keysAndValues)),
|
||||
append([]interface{}{msg}, keysAndValues...)...)
|
||||
}
|
||||
}
|
||||
|
||||
func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
keysAndValues = formatTimes(keysAndValues)
|
||||
pl.logger.Printf(
|
||||
formatString(len(keysAndValues)+2),
|
||||
append([]interface{}{msg, "error", err}, keysAndValues...)...)
|
||||
}
|
||||
|
||||
// formatString returns a logfmt-like format string for the number of
|
||||
// key/values.
|
||||
func formatString(numKeysAndValues int) string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("%s")
|
||||
if numKeysAndValues > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
for i := 0; i < numKeysAndValues/2; i++ {
|
||||
if i > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString("%v=%v")
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// formatTimes formats any time.Time values as RFC3339.
|
||||
func formatTimes(keysAndValues []interface{}) []interface{} {
|
||||
var formattedArgs []interface{}
|
||||
for _, arg := range keysAndValues {
|
||||
if t, ok := arg.(time.Time); ok {
|
||||
arg = t.Format(time.RFC3339)
|
||||
}
|
||||
formattedArgs = append(formattedArgs, arg)
|
||||
}
|
||||
return formattedArgs
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package cron
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Option represents a modification to the default behavior of a Cron.
|
||||
type Option func(*Cron)
|
||||
|
||||
// WithLocation overrides the timezone of the cron instance.
|
||||
func WithLocation(loc *time.Location) Option {
|
||||
return func(c *Cron) {
|
||||
c.location = loc
|
||||
}
|
||||
}
|
||||
|
||||
// WithSeconds overrides the parser used for interpreting job schedules to
|
||||
// include a seconds field as the first one.
|
||||
func WithSeconds() Option {
|
||||
return WithParser(NewParser(
|
||||
Second | Minute | Hour | Dom | Month | Dow | Descriptor,
|
||||
))
|
||||
}
|
||||
|
||||
// WithParser overrides the parser used for interpreting job schedules.
|
||||
func WithParser(p ScheduleParser) Option {
|
||||
return func(c *Cron) {
|
||||
c.parser = p
|
||||
}
|
||||
}
|
||||
|
||||
// WithChain specifies Job wrappers to apply to all jobs added to this cron.
|
||||
// Refer to the Chain* functions in this package for provided wrappers.
|
||||
func WithChain(wrappers ...JobWrapper) Option {
|
||||
return func(c *Cron) {
|
||||
c.chain = NewChain(wrappers...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger uses the provided logger.
|
||||
func WithLogger(logger Logger) Option {
|
||||
return func(c *Cron) {
|
||||
c.logger = logger
|
||||
}
|
||||
}
|
|
@ -0,0 +1,434 @@
|
|||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Configuration options for creating a parser. Most options specify which
|
||||
// fields should be included, while others enable features. If a field is not
|
||||
// included the parser will assume a default value. These options do not change
|
||||
// the order fields are parse in.
|
||||
type ParseOption int
|
||||
|
||||
const (
|
||||
Second ParseOption = 1 << iota // Seconds field, default 0
|
||||
SecondOptional // Optional seconds field, default 0
|
||||
Minute // Minutes field, default 0
|
||||
Hour // Hours field, default 0
|
||||
Dom // Day of month field, default *
|
||||
Month // Month field, default *
|
||||
Dow // Day of week field, default *
|
||||
DowOptional // Optional day of week field, default *
|
||||
Descriptor // Allow descriptors such as @monthly, @weekly, etc.
|
||||
)
|
||||
|
||||
var places = []ParseOption{
|
||||
Second,
|
||||
Minute,
|
||||
Hour,
|
||||
Dom,
|
||||
Month,
|
||||
Dow,
|
||||
}
|
||||
|
||||
var defaults = []string{
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"*",
|
||||
"*",
|
||||
"*",
|
||||
}
|
||||
|
||||
// A custom Parser that can be configured.
|
||||
type Parser struct {
|
||||
options ParseOption
|
||||
}
|
||||
|
||||
// NewParser creates a Parser with custom options.
|
||||
//
|
||||
// It panics if more than one Optional is given, since it would be impossible to
|
||||
// correctly infer which optional is provided or missing in general.
|
||||
//
|
||||
// Examples
|
||||
//
|
||||
// // Standard parser without descriptors
|
||||
// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
|
||||
// sched, err := specParser.Parse("0 0 15 */3 *")
|
||||
//
|
||||
// // Same as above, just excludes time fields
|
||||
// subsParser := NewParser(Dom | Month | Dow)
|
||||
// sched, err := specParser.Parse("15 */3 *")
|
||||
//
|
||||
// // Same as above, just makes Dow optional
|
||||
// subsParser := NewParser(Dom | Month | DowOptional)
|
||||
// sched, err := specParser.Parse("15 */3")
|
||||
//
|
||||
func NewParser(options ParseOption) Parser {
|
||||
optionals := 0
|
||||
if options&DowOptional > 0 {
|
||||
optionals++
|
||||
}
|
||||
if options&SecondOptional > 0 {
|
||||
optionals++
|
||||
}
|
||||
if optionals > 1 {
|
||||
panic("multiple optionals may not be configured")
|
||||
}
|
||||
return Parser{options}
|
||||
}
|
||||
|
||||
// Parse returns a new crontab schedule representing the given spec.
|
||||
// It returns a descriptive error if the spec is not valid.
|
||||
// It accepts crontab specs and features configured by NewParser.
|
||||
func (p Parser) Parse(spec string) (Schedule, error) {
|
||||
if len(spec) == 0 {
|
||||
return nil, fmt.Errorf("empty spec string")
|
||||
}
|
||||
|
||||
// Extract timezone if present
|
||||
var loc = time.Local
|
||||
if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") {
|
||||
var err error
|
||||
i := strings.Index(spec, " ")
|
||||
eq := strings.Index(spec, "=")
|
||||
if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil {
|
||||
return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err)
|
||||
}
|
||||
spec = strings.TrimSpace(spec[i:])
|
||||
}
|
||||
|
||||
// Handle named schedules (descriptors), if configured
|
||||
if strings.HasPrefix(spec, "@") {
|
||||
if p.options&Descriptor == 0 {
|
||||
return nil, fmt.Errorf("parser does not accept descriptors: %v", spec)
|
||||
}
|
||||
return parseDescriptor(spec, loc)
|
||||
}
|
||||
|
||||
// Split on whitespace.
|
||||
fields := strings.Fields(spec)
|
||||
|
||||
// Validate & fill in any omitted or optional fields
|
||||
var err error
|
||||
fields, err = normalizeFields(fields, p.options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
field := func(field string, r bounds) uint64 {
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
var bits uint64
|
||||
bits, err = getField(field, r)
|
||||
return bits
|
||||
}
|
||||
|
||||
var (
|
||||
second = field(fields[0], seconds)
|
||||
minute = field(fields[1], minutes)
|
||||
hour = field(fields[2], hours)
|
||||
dayofmonth = field(fields[3], dom)
|
||||
month = field(fields[4], months)
|
||||
dayofweek = field(fields[5], dow)
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SpecSchedule{
|
||||
Second: second,
|
||||
Minute: minute,
|
||||
Hour: hour,
|
||||
Dom: dayofmonth,
|
||||
Month: month,
|
||||
Dow: dayofweek,
|
||||
Location: loc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// normalizeFields takes a subset set of the time fields and returns the full set
|
||||
// with defaults (zeroes) populated for unset fields.
|
||||
//
|
||||
// As part of performing this function, it also validates that the provided
|
||||
// fields are compatible with the configured options.
|
||||
func normalizeFields(fields []string, options ParseOption) ([]string, error) {
|
||||
// Validate optionals & add their field to options
|
||||
optionals := 0
|
||||
if options&SecondOptional > 0 {
|
||||
options |= Second
|
||||
optionals++
|
||||
}
|
||||
if options&DowOptional > 0 {
|
||||
options |= Dow
|
||||
optionals++
|
||||
}
|
||||
if optionals > 1 {
|
||||
return nil, fmt.Errorf("multiple optionals may not be configured")
|
||||
}
|
||||
|
||||
// Figure out how many fields we need
|
||||
max := 0
|
||||
for _, place := range places {
|
||||
if options&place > 0 {
|
||||
max++
|
||||
}
|
||||
}
|
||||
min := max - optionals
|
||||
|
||||
// Validate number of fields
|
||||
if count := len(fields); count < min || count > max {
|
||||
if min == max {
|
||||
return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields)
|
||||
}
|
||||
return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields)
|
||||
}
|
||||
|
||||
// Populate the optional field if not provided
|
||||
if min < max && len(fields) == min {
|
||||
switch {
|
||||
case options&DowOptional > 0:
|
||||
fields = append(fields, defaults[5]) // TODO: improve access to default
|
||||
case options&SecondOptional > 0:
|
||||
fields = append([]string{defaults[0]}, fields...)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown optional field")
|
||||
}
|
||||
}
|
||||
|
||||
// Populate all fields not part of options with their defaults
|
||||
n := 0
|
||||
expandedFields := make([]string, len(places))
|
||||
copy(expandedFields, defaults)
|
||||
for i, place := range places {
|
||||
if options&place > 0 {
|
||||
expandedFields[i] = fields[n]
|
||||
n++
|
||||
}
|
||||
}
|
||||
return expandedFields, nil
|
||||
}
|
||||
|
||||
var standardParser = NewParser(
|
||||
Minute | Hour | Dom | Month | Dow | Descriptor,
|
||||
)
|
||||
|
||||
// ParseStandard returns a new crontab schedule representing the given
|
||||
// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries
|
||||
// representing: minute, hour, day of month, month and day of week, in that
|
||||
// order. It returns a descriptive error if the spec is not valid.
|
||||
//
|
||||
// It accepts
|
||||
// - Standard crontab specs, e.g. "* * * * ?"
|
||||
// - Descriptors, e.g. "@midnight", "@every 1h30m"
|
||||
func ParseStandard(standardSpec string) (Schedule, error) {
|
||||
return standardParser.Parse(standardSpec)
|
||||
}
|
||||
|
||||
// getField returns an Int with the bits set representing all of the times that
|
||||
// the field represents or error parsing field value. A "field" is a comma-separated
|
||||
// list of "ranges".
|
||||
func getField(field string, r bounds) (uint64, error) {
|
||||
var bits uint64
|
||||
ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
|
||||
for _, expr := range ranges {
|
||||
bit, err := getRange(expr, r)
|
||||
if err != nil {
|
||||
return bits, err
|
||||
}
|
||||
bits |= bit
|
||||
}
|
||||
return bits, nil
|
||||
}
|
||||
|
||||
// getRange returns the bits indicated by the given expression:
|
||||
// number | number "-" number [ "/" number ]
|
||||
// or error parsing range.
|
||||
func getRange(expr string, r bounds) (uint64, error) {
|
||||
var (
|
||||
start, end, step uint
|
||||
rangeAndStep = strings.Split(expr, "/")
|
||||
lowAndHigh = strings.Split(rangeAndStep[0], "-")
|
||||
singleDigit = len(lowAndHigh) == 1
|
||||
err error
|
||||
)
|
||||
|
||||
var extra uint64
|
||||
if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
|
||||
start = r.min
|
||||
end = r.max
|
||||
extra = starBit
|
||||
} else {
|
||||
start, err = parseIntOrName(lowAndHigh[0], r.names)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch len(lowAndHigh) {
|
||||
case 1:
|
||||
end = start
|
||||
case 2:
|
||||
end, err = parseIntOrName(lowAndHigh[1], r.names)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("too many hyphens: %s", expr)
|
||||
}
|
||||
}
|
||||
|
||||
switch len(rangeAndStep) {
|
||||
case 1:
|
||||
step = 1
|
||||
case 2:
|
||||
step, err = mustParseInt(rangeAndStep[1])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Special handling: "N/step" means "N-max/step".
|
||||
if singleDigit {
|
||||
end = r.max
|
||||
}
|
||||
if step > 1 {
|
||||
extra = 0
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("too many slashes: %s", expr)
|
||||
}
|
||||
|
||||
if start < r.min {
|
||||
return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
|
||||
}
|
||||
if end > r.max {
|
||||
return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr)
|
||||
}
|
||||
if start > end {
|
||||
return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
|
||||
}
|
||||
if step == 0 {
|
||||
return 0, fmt.Errorf("step of range should be a positive number: %s", expr)
|
||||
}
|
||||
|
||||
return getBits(start, end, step) | extra, nil
|
||||
}
|
||||
|
||||
// parseIntOrName returns the (possibly-named) integer contained in expr.
|
||||
func parseIntOrName(expr string, names map[string]uint) (uint, error) {
|
||||
if names != nil {
|
||||
if namedInt, ok := names[strings.ToLower(expr)]; ok {
|
||||
return namedInt, nil
|
||||
}
|
||||
}
|
||||
return mustParseInt(expr)
|
||||
}
|
||||
|
||||
// mustParseInt parses the given expression as an int or returns an error.
|
||||
func mustParseInt(expr string) (uint, error) {
|
||||
num, err := strconv.Atoi(expr)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err)
|
||||
}
|
||||
if num < 0 {
|
||||
return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr)
|
||||
}
|
||||
|
||||
return uint(num), nil
|
||||
}
|
||||
|
||||
// getBits sets all bits in the range [min, max], modulo the given step size.
|
||||
func getBits(min, max, step uint) uint64 {
|
||||
var bits uint64
|
||||
|
||||
// If step is 1, use shifts.
|
||||
if step == 1 {
|
||||
return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
|
||||
}
|
||||
|
||||
// Else, use a simple loop.
|
||||
for i := min; i <= max; i += step {
|
||||
bits |= 1 << i
|
||||
}
|
||||
return bits
|
||||
}
|
||||
|
||||
// all returns all bits within the given bounds. (plus the star bit)
|
||||
func all(r bounds) uint64 {
|
||||
return getBits(r.min, r.max, 1) | starBit
|
||||
}
|
||||
|
||||
// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
|
||||
func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) {
|
||||
switch descriptor {
|
||||
case "@yearly", "@annually":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: 1 << hours.min,
|
||||
Dom: 1 << dom.min,
|
||||
Month: 1 << months.min,
|
||||
Dow: all(dow),
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
case "@monthly":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: 1 << hours.min,
|
||||
Dom: 1 << dom.min,
|
||||
Month: all(months),
|
||||
Dow: all(dow),
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
case "@weekly":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: 1 << hours.min,
|
||||
Dom: all(dom),
|
||||
Month: all(months),
|
||||
Dow: 1 << dow.min,
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
case "@daily", "@midnight":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: 1 << hours.min,
|
||||
Dom: all(dom),
|
||||
Month: all(months),
|
||||
Dow: all(dow),
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
case "@hourly":
|
||||
return &SpecSchedule{
|
||||
Second: 1 << seconds.min,
|
||||
Minute: 1 << minutes.min,
|
||||
Hour: all(hours),
|
||||
Dom: all(dom),
|
||||
Month: all(months),
|
||||
Dow: all(dow),
|
||||
Location: loc,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
const every = "@every "
|
||||
if strings.HasPrefix(descriptor, every) {
|
||||
duration, err := time.ParseDuration(descriptor[len(every):])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err)
|
||||
}
|
||||
return Every(duration), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor)
|
||||
}
|
|
@ -0,0 +1,188 @@
|
|||
package cron
|
||||
|
||||
import "time"
|
||||
|
||||
// SpecSchedule specifies a duty cycle (to the second granularity), based on a
|
||||
// traditional crontab specification. It is computed initially and stored as bit sets.
|
||||
type SpecSchedule struct {
|
||||
Second, Minute, Hour, Dom, Month, Dow uint64
|
||||
|
||||
// Override location for this schedule.
|
||||
Location *time.Location
|
||||
}
|
||||
|
||||
// bounds provides a range of acceptable values (plus a map of name to value).
|
||||
type bounds struct {
|
||||
min, max uint
|
||||
names map[string]uint
|
||||
}
|
||||
|
||||
// The bounds for each field.
|
||||
var (
|
||||
seconds = bounds{0, 59, nil}
|
||||
minutes = bounds{0, 59, nil}
|
||||
hours = bounds{0, 23, nil}
|
||||
dom = bounds{1, 31, nil}
|
||||
months = bounds{1, 12, map[string]uint{
|
||||
"jan": 1,
|
||||
"feb": 2,
|
||||
"mar": 3,
|
||||
"apr": 4,
|
||||
"may": 5,
|
||||
"jun": 6,
|
||||
"jul": 7,
|
||||
"aug": 8,
|
||||
"sep": 9,
|
||||
"oct": 10,
|
||||
"nov": 11,
|
||||
"dec": 12,
|
||||
}}
|
||||
dow = bounds{0, 6, map[string]uint{
|
||||
"sun": 0,
|
||||
"mon": 1,
|
||||
"tue": 2,
|
||||
"wed": 3,
|
||||
"thu": 4,
|
||||
"fri": 5,
|
||||
"sat": 6,
|
||||
}}
|
||||
)
|
||||
|
||||
const (
|
||||
// Set the top bit if a star was included in the expression.
|
||||
starBit = 1 << 63
|
||||
)
|
||||
|
||||
// Next returns the next time this schedule is activated, greater than the given
|
||||
// time. If no time can be found to satisfy the schedule, return the zero time.
|
||||
func (s *SpecSchedule) Next(t time.Time) time.Time {
|
||||
// General approach
|
||||
//
|
||||
// For Month, Day, Hour, Minute, Second:
|
||||
// Check if the time value matches. If yes, continue to the next field.
|
||||
// If the field doesn't match the schedule, then increment the field until it matches.
|
||||
// While incrementing the field, a wrap-around brings it back to the beginning
|
||||
// of the field list (since it is necessary to re-verify previous field
|
||||
// values)
|
||||
|
||||
// Convert the given time into the schedule's timezone, if one is specified.
|
||||
// Save the original timezone so we can convert back after we find a time.
|
||||
// Note that schedules without a time zone specified (time.Local) are treated
|
||||
// as local to the time provided.
|
||||
origLocation := t.Location()
|
||||
loc := s.Location
|
||||
if loc == time.Local {
|
||||
loc = t.Location()
|
||||
}
|
||||
if s.Location != time.Local {
|
||||
t = t.In(s.Location)
|
||||
}
|
||||
|
||||
// Start at the earliest possible time (the upcoming second).
|
||||
t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
|
||||
|
||||
// This flag indicates whether a field has been incremented.
|
||||
added := false
|
||||
|
||||
// If no time is found within five years, return zero.
|
||||
yearLimit := t.Year() + 5
|
||||
|
||||
WRAP:
|
||||
if t.Year() > yearLimit {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Find the first applicable month.
|
||||
// If it's this month, then do nothing.
|
||||
for 1<<uint(t.Month())&s.Month == 0 {
|
||||
// If we have to add a month, reset the other parts to 0.
|
||||
if !added {
|
||||
added = true
|
||||
// Otherwise, set the date at the beginning (since the current time is irrelevant).
|
||||
t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, loc)
|
||||
}
|
||||
t = t.AddDate(0, 1, 0)
|
||||
|
||||
// Wrapped around.
|
||||
if t.Month() == time.January {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
// Now get a day in that month.
|
||||
//
|
||||
// NOTE: This causes issues for daylight savings regimes where midnight does
|
||||
// not exist. For example: Sao Paulo has DST that transforms midnight on
|
||||
// 11/3 into 1am. Handle that by noticing when the Hour ends up != 0.
|
||||
for !dayMatches(s, t) {
|
||||
if !added {
|
||||
added = true
|
||||
t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, loc)
|
||||
}
|
||||
t = t.AddDate(0, 0, 1)
|
||||
// Notice if the hour is no longer midnight due to DST.
|
||||
// Add an hour if it's 23, subtract an hour if it's 1.
|
||||
if t.Hour() != 0 {
|
||||
if t.Hour() > 12 {
|
||||
t = t.Add(time.Duration(24-t.Hour()) * time.Hour)
|
||||
} else {
|
||||
t = t.Add(time.Duration(-t.Hour()) * time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
if t.Day() == 1 {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
for 1<<uint(t.Hour())&s.Hour == 0 {
|
||||
if !added {
|
||||
added = true
|
||||
t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, loc)
|
||||
}
|
||||
t = t.Add(1 * time.Hour)
|
||||
|
||||
if t.Hour() == 0 {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
for 1<<uint(t.Minute())&s.Minute == 0 {
|
||||
if !added {
|
||||
added = true
|
||||
t = t.Truncate(time.Minute)
|
||||
}
|
||||
t = t.Add(1 * time.Minute)
|
||||
|
||||
if t.Minute() == 0 {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
for 1<<uint(t.Second())&s.Second == 0 {
|
||||
if !added {
|
||||
added = true
|
||||
t = t.Truncate(time.Second)
|
||||
}
|
||||
t = t.Add(1 * time.Second)
|
||||
|
||||
if t.Second() == 0 {
|
||||
goto WRAP
|
||||
}
|
||||
}
|
||||
|
||||
return t.In(origLocation)
|
||||
}
|
||||
|
||||
// dayMatches returns true if the schedule's day-of-week and day-of-month
|
||||
// restrictions are satisfied by the given time.
|
||||
func dayMatches(s *SpecSchedule, t time.Time) bool {
|
||||
var (
|
||||
domMatch bool = 1<<uint(t.Day())&s.Dom > 0
|
||||
dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0
|
||||
)
|
||||
if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
|
||||
return domMatch && dowMatch
|
||||
}
|
||||
return domMatch || dowMatch
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
//go:build go1.12
|
||||
// +build go1.12
|
||||
|
||||
package fmtsort
|
||||
|
@ -7,12 +8,16 @@ import "reflect"
|
|||
const brokenNaNs = false
|
||||
|
||||
func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) {
|
||||
key := make([]reflect.Value, mapValue.Len())
|
||||
value := make([]reflect.Value, len(key))
|
||||
// Note: this code is arranged to not panic even in the presence
|
||||
// of a concurrent map update. The runtime is responsible for
|
||||
// yelling loudly if that happens. See issue 33275.
|
||||
n := mapValue.Len()
|
||||
key := make([]reflect.Value, 0, n)
|
||||
value := make([]reflect.Value, 0, n)
|
||||
iter := mapValue.MapRange()
|
||||
for i := 0; iter.Next(); i++ {
|
||||
key[i] = iter.Key()
|
||||
value[i] = iter.Value()
|
||||
for iter.Next() {
|
||||
key = append(key, iter.Key())
|
||||
value = append(value, iter.Value())
|
||||
}
|
||||
return key, value
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !go1.12
|
||||
// +build !go1.12
|
||||
|
||||
package fmtsort
|
||||
|
@ -8,8 +9,8 @@ const brokenNaNs = true
|
|||
|
||||
func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) {
|
||||
key := mapValue.MapKeys()
|
||||
value := make([]reflect.Value, len(key))
|
||||
for i, k := range key {
|
||||
value := make([]reflect.Value, 0, len(key))
|
||||
for _, k := range key {
|
||||
v := mapValue.MapIndex(k)
|
||||
if !v.IsValid() {
|
||||
// Note: we can't retrieve the value, probably because
|
||||
|
@ -17,7 +18,7 @@ func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) {
|
|||
// add a zero value of the correct type in that case.
|
||||
v = reflect.Zero(mapValue.Type().Elem())
|
||||
}
|
||||
value[i] = v
|
||||
value = append(value, v)
|
||||
}
|
||||
return key, value
|
||||
}
|
||||
|
|
|
@ -12,6 +12,9 @@ github.com/MakeNowJust/heredoc
|
|||
# github.com/NYTimes/gziphandler v1.1.1
|
||||
## explicit; go 1.11
|
||||
github.com/NYTimes/gziphandler
|
||||
# github.com/adhocore/gronx v1.6.3
|
||||
## explicit; go 1.13
|
||||
github.com/adhocore/gronx
|
||||
# github.com/alessio/shellescape v1.4.1
|
||||
## explicit; go 1.14
|
||||
github.com/alessio/shellescape
|
||||
|
@ -87,6 +90,9 @@ github.com/fsnotify/fsnotify
|
|||
# github.com/fvbommel/sortorder v1.0.1
|
||||
## explicit; go 1.13
|
||||
github.com/fvbommel/sortorder
|
||||
# github.com/go-co-op/gocron v1.30.1
|
||||
## explicit; go 1.16
|
||||
github.com/go-co-op/gocron
|
||||
# github.com/go-errors/errors v1.0.1
|
||||
## explicit
|
||||
github.com/go-errors/errors
|
||||
|
@ -398,8 +404,11 @@ github.com/prometheus/procfs/internal/util
|
|||
# github.com/rivo/uniseg v0.4.2
|
||||
## explicit; go 1.18
|
||||
github.com/rivo/uniseg
|
||||
# github.com/rogpeppe/go-internal v1.6.1
|
||||
## explicit; go 1.11
|
||||
# github.com/robfig/cron/v3 v3.0.1
|
||||
## explicit; go 1.12
|
||||
github.com/robfig/cron/v3
|
||||
# github.com/rogpeppe/go-internal v1.8.1
|
||||
## explicit; go 1.16
|
||||
github.com/rogpeppe/go-internal/fmtsort
|
||||
# github.com/rs/zerolog v1.26.1
|
||||
## explicit; go 1.15
|
||||
|
|
Loading…
Reference in New Issue