Update `fluxcd/pkg/runtime` to v0.58.0
Signed-off-by: Matheus Pimenta <matheuscscp@gmail.com>
This commit is contained in:
parent
dc0e5853c0
commit
42b9036bf3
4
go.mod
4
go.mod
|
@ -24,7 +24,7 @@ require (
|
|||
github.com/fluxcd/pkg/apis/meta v1.10.0
|
||||
github.com/fluxcd/pkg/http/fetch v0.15.0
|
||||
github.com/fluxcd/pkg/kustomize v1.16.0
|
||||
github.com/fluxcd/pkg/runtime v0.54.0
|
||||
github.com/fluxcd/pkg/runtime v0.58.0
|
||||
github.com/fluxcd/pkg/ssa v0.45.1
|
||||
github.com/fluxcd/pkg/tar v0.11.0
|
||||
github.com/fluxcd/pkg/testserver v0.10.0
|
||||
|
@ -40,7 +40,7 @@ require (
|
|||
k8s.io/apimachinery v0.32.2
|
||||
k8s.io/client-go v0.32.2
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758
|
||||
sigs.k8s.io/controller-runtime v0.20.2
|
||||
sigs.k8s.io/controller-runtime v0.20.4
|
||||
sigs.k8s.io/kustomize/api v0.19.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
|
8
go.sum
8
go.sum
|
@ -187,8 +187,8 @@ github.com/fluxcd/pkg/http/fetch v0.15.0 h1:AJ1JuE2asuK4QMfbHjxctFURke5FvZtyljjI
|
|||
github.com/fluxcd/pkg/http/fetch v0.15.0/go.mod h1:feTESfETKU14jq+e/Ce8QnMBTCh9O79bLMSMe5t55fQ=
|
||||
github.com/fluxcd/pkg/kustomize v1.16.0 h1:UBOeIvkrC6y4owYs7vZwG5PUVFeqnRoDFN9eaNhuNPI=
|
||||
github.com/fluxcd/pkg/kustomize v1.16.0/go.mod h1:6yQkAZaG+w3nXY30LbyWRYHimjRcLRwlYkrwG0ygMSI=
|
||||
github.com/fluxcd/pkg/runtime v0.54.0 h1:H7zSW8mTIZIkXaOdxzvi+oK0cH3jccyLoCBbXDPIGjg=
|
||||
github.com/fluxcd/pkg/runtime v0.54.0/go.mod h1:PC73Yn/AaBQXnd2YYq0cnQqF3RmQKoM265crrjFJnKI=
|
||||
github.com/fluxcd/pkg/runtime v0.58.0 h1:aic88k/PUqEOzq62nxav9XEyUicAbT+fiDcJ7dzWhqc=
|
||||
github.com/fluxcd/pkg/runtime v0.58.0/go.mod h1:ZRlEHAHhlP3gPl7/+kZ8i8nimZ+/mSnpURlexBJULnI=
|
||||
github.com/fluxcd/pkg/sourceignore v0.11.0 h1:xzpYmc5/t/Ck+/DkJSX3r+VbahDRIAn5kbv04fynWUo=
|
||||
github.com/fluxcd/pkg/sourceignore v0.11.0/go.mod h1:ri2FvlzX8ep2iszOK5gF/riYq2TNgpVvsfJ2QY0dLWI=
|
||||
github.com/fluxcd/pkg/ssa v0.45.1 h1:ISl84TJwRP/GuZXrKiR9Tf8JOnG5XFgtjcYoR4XQYf4=
|
||||
|
@ -580,8 +580,8 @@ k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us=
|
|||
k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-runtime v0.20.2 h1:/439OZVxoEc02psi1h4QO3bHzTgu49bb347Xp4gW1pc=
|
||||
sigs.k8s.io/controller-runtime v0.20.2/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
|
||||
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
|
||||
sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ=
|
||||
|
|
|
@ -119,6 +119,6 @@ data: {}
|
|||
for _, cond := range []string{meta.ReadyCondition, meta.StalledCondition} {
|
||||
g.Expect(conditions.GetReason(resultK, cond)).To(Equal(meta.InvalidCELExpressionReason))
|
||||
g.Expect(conditions.GetMessage(resultK, cond)).To(ContainSubstring(
|
||||
"failed to create custom status reader for healthchecks[0]: failed to parse the expression InProgress: failed to parse the CEL expression 'foo.': ERROR: <input>:1:5: Syntax error: no viable alternative at input '.'"))
|
||||
"failed to create custom status evaluator for healthchecks[0]: failed to parse the expression InProgress: failed to parse the CEL expression 'foo.': ERROR: <input>:1:5: Syntax error: no viable alternative at input '.'"))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/fluxcd/cli-utils/pkg/kstatus/polling"
|
||||
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
|
||||
"github.com/fluxcd/cli-utils/pkg/object"
|
||||
apiacl "github.com/fluxcd/pkg/apis/acl"
|
||||
eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1"
|
||||
|
@ -92,7 +92,7 @@ type KustomizationReconciler struct {
|
|||
|
||||
Mapper apimeta.RESTMapper
|
||||
APIReader client.Reader
|
||||
PollingOpts polling.Options
|
||||
ClusterReader engine.ClusterReaderFactory
|
||||
ControllerName string
|
||||
statusManager string
|
||||
NoCrossNamespaceRefs bool
|
||||
|
@ -223,7 +223,7 @@ func (r *KustomizationReconciler) Reconcile(ctx context.Context, req ctrl.Reques
|
|||
}
|
||||
|
||||
// Configure custom health checks.
|
||||
pollingOpts, err := r.getPollerOptions(ctx, obj)
|
||||
statusReaders, err := cel.PollerWithCustomHealthChecks(ctx, obj.Spec.HealthCheckExprs)
|
||||
if err != nil {
|
||||
const msg = "Reconciliation failed terminally due to configuration error"
|
||||
errMsg := fmt.Sprintf("%s: %v", msg, err)
|
||||
|
@ -280,7 +280,7 @@ func (r *KustomizationReconciler) Reconcile(ctx context.Context, req ctrl.Reques
|
|||
}
|
||||
|
||||
// Reconcile the latest revision.
|
||||
reconcileErr := r.reconcile(ctx, obj, artifactSource, patcher, pollingOpts)
|
||||
reconcileErr := r.reconcile(ctx, obj, artifactSource, patcher, statusReaders)
|
||||
|
||||
// Requeue at the specified retry interval if the artifact tarball is not found.
|
||||
if errors.Is(reconcileErr, fetch.ErrFileNotFound) {
|
||||
|
@ -311,7 +311,7 @@ func (r *KustomizationReconciler) reconcile(
|
|||
obj *kustomizev1.Kustomization,
|
||||
src sourcev1.Source,
|
||||
patcher *patch.SerialPatcher,
|
||||
pollingOpts polling.Options) error {
|
||||
statusReaders []func(apimeta.RESTMapper) engine.StatusReader) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
// Update status with the reconciliation progress.
|
||||
|
@ -378,15 +378,20 @@ func (r *KustomizationReconciler) reconcile(
|
|||
}
|
||||
|
||||
// Configure the Kubernetes client for impersonation.
|
||||
impersonation := runtimeClient.NewImpersonator(
|
||||
r.Client,
|
||||
pollingOpts,
|
||||
obj.Spec.KubeConfig,
|
||||
r.KubeConfigOpts,
|
||||
r.DefaultServiceAccount,
|
||||
obj.Spec.ServiceAccountName,
|
||||
obj.GetNamespace(),
|
||||
)
|
||||
var impersonatorOpts []runtimeClient.ImpersonatorOption
|
||||
if r.DefaultServiceAccount != "" || obj.Spec.ServiceAccountName != "" {
|
||||
impersonatorOpts = append(impersonatorOpts,
|
||||
runtimeClient.WithServiceAccount(r.DefaultServiceAccount, obj.Spec.ServiceAccountName, obj.GetNamespace()))
|
||||
}
|
||||
if obj.Spec.KubeConfig != nil {
|
||||
impersonatorOpts = append(impersonatorOpts,
|
||||
runtimeClient.WithKubeConfig(obj.Spec.KubeConfig, r.KubeConfigOpts, obj.GetNamespace()))
|
||||
}
|
||||
if r.ClusterReader != nil || len(statusReaders) > 0 {
|
||||
impersonatorOpts = append(impersonatorOpts,
|
||||
runtimeClient.WithPolling(r.ClusterReader, statusReaders...))
|
||||
}
|
||||
impersonation := runtimeClient.NewImpersonator(r.Client, impersonatorOpts...)
|
||||
|
||||
// Create the Kubernetes client that runs under impersonation.
|
||||
kubeClient, statusPoller, err := impersonation.GetClient(ctx)
|
||||
|
@ -1007,15 +1012,19 @@ func (r *KustomizationReconciler) finalize(ctx context.Context,
|
|||
obj.Status.Inventory.Entries != nil {
|
||||
objects, _ := inventory.List(obj.Status.Inventory)
|
||||
|
||||
impersonation := runtimeClient.NewImpersonator(
|
||||
r.Client,
|
||||
r.PollingOpts,
|
||||
obj.Spec.KubeConfig,
|
||||
r.KubeConfigOpts,
|
||||
r.DefaultServiceAccount,
|
||||
obj.Spec.ServiceAccountName,
|
||||
obj.GetNamespace(),
|
||||
)
|
||||
var impersonatorOpts []runtimeClient.ImpersonatorOption
|
||||
if r.DefaultServiceAccount != "" || obj.Spec.ServiceAccountName != "" {
|
||||
impersonatorOpts = append(impersonatorOpts,
|
||||
runtimeClient.WithServiceAccount(r.DefaultServiceAccount, obj.Spec.ServiceAccountName, obj.GetNamespace()))
|
||||
}
|
||||
if obj.Spec.KubeConfig != nil {
|
||||
impersonatorOpts = append(impersonatorOpts,
|
||||
runtimeClient.WithKubeConfig(obj.Spec.KubeConfig, r.KubeConfigOpts, obj.GetNamespace()))
|
||||
}
|
||||
if r.ClusterReader != nil {
|
||||
impersonatorOpts = append(impersonatorOpts, runtimeClient.WithPolling(r.ClusterReader))
|
||||
}
|
||||
impersonation := runtimeClient.NewImpersonator(r.Client, impersonatorOpts...)
|
||||
if impersonation.CanImpersonate(ctx) {
|
||||
kubeClient, _, err := impersonation.GetClient(ctx)
|
||||
if err != nil {
|
||||
|
@ -1156,21 +1165,3 @@ func getOriginRevision(src sourcev1.Source) string {
|
|||
}
|
||||
return a.Metadata[OCIArtifactOriginRevisionAnnotation]
|
||||
}
|
||||
|
||||
// getPollerOptions returns the status poller options
|
||||
// based on the healthcheck expressions defined in the Kustomization
|
||||
// object spec.
|
||||
func (r *KustomizationReconciler) getPollerOptions(ctx context.Context,
|
||||
obj *kustomizev1.Kustomization) (polling.Options, error) {
|
||||
opts := r.PollingOpts
|
||||
|
||||
if hc := obj.Spec.HealthCheckExprs; len(hc) > 0 {
|
||||
var err error
|
||||
opts, err = cel.PollerWithCustomHealthChecks(ctx, opts, hc, r.Mapper)
|
||||
if err != nil {
|
||||
return polling.Options{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
|
|
@ -1,118 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statusreaders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
|
||||
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
|
||||
kstatusreaders "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
|
||||
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
|
||||
"github.com/fluxcd/cli-utils/pkg/object"
|
||||
)
|
||||
|
||||
type customJobStatusReader struct {
|
||||
genericStatusReader engine.StatusReader
|
||||
}
|
||||
|
||||
func NewCustomJobStatusReader(mapper meta.RESTMapper) engine.StatusReader {
|
||||
genericStatusReader := kstatusreaders.NewGenericStatusReader(mapper, jobConditions)
|
||||
return &customJobStatusReader{
|
||||
genericStatusReader: genericStatusReader,
|
||||
}
|
||||
}
|
||||
|
||||
func (j *customJobStatusReader) Supports(gk schema.GroupKind) bool {
|
||||
return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind()
|
||||
}
|
||||
|
||||
func (j *customJobStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) {
|
||||
return j.genericStatusReader.ReadStatus(ctx, reader, resource)
|
||||
}
|
||||
|
||||
func (j *customJobStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) {
|
||||
return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource)
|
||||
}
|
||||
|
||||
// Ref: https://github.com/kubernetes-sigs/cli-utils/blob/v0.29.4/pkg/kstatus/status/core.go
|
||||
// Modified to return Current status only when the Job has completed as opposed to when it's in progress.
|
||||
func jobConditions(u *unstructured.Unstructured) (*status.Result, error) {
|
||||
obj := u.UnstructuredContent()
|
||||
|
||||
parallelism := status.GetIntField(obj, ".spec.parallelism", 1)
|
||||
completions := status.GetIntField(obj, ".spec.completions", parallelism)
|
||||
succeeded := status.GetIntField(obj, ".status.succeeded", 0)
|
||||
failed := status.GetIntField(obj, ".status.failed", 0)
|
||||
|
||||
// Conditions
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/job/utils.go#L24
|
||||
objc, err := status.GetObjectWithConditions(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, c := range objc.Status.Conditions {
|
||||
switch c.Type {
|
||||
case "Complete":
|
||||
if c.Status == corev1.ConditionTrue {
|
||||
message := fmt.Sprintf("Job Completed. succeeded: %d/%d", succeeded, completions)
|
||||
return &status.Result{
|
||||
Status: status.CurrentStatus,
|
||||
Message: message,
|
||||
Conditions: []status.Condition{},
|
||||
}, nil
|
||||
}
|
||||
case "Failed":
|
||||
message := fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions)
|
||||
if c.Status == corev1.ConditionTrue {
|
||||
return &status.Result{
|
||||
Status: status.FailedStatus,
|
||||
Message: message,
|
||||
Conditions: []status.Condition{
|
||||
{
|
||||
Type: status.ConditionStalled,
|
||||
Status: corev1.ConditionTrue,
|
||||
Reason: "JobFailed",
|
||||
Message: message,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
message := "Job in progress"
|
||||
return &status.Result{
|
||||
Status: status.InProgressStatus,
|
||||
Message: message,
|
||||
Conditions: []status.Condition{
|
||||
{
|
||||
Type: status.ConditionReconciling,
|
||||
Status: corev1.ConditionTrue,
|
||||
Reason: "JobInProgress",
|
||||
Message: message,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statusreaders
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
|
||||
"github.com/fluxcd/pkg/runtime/patch"
|
||||
)
|
||||
|
||||
func Test_jobConditions(t *testing.T) {
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job",
|
||||
},
|
||||
Spec: batchv1.JobSpec{},
|
||||
Status: batchv1.JobStatus{},
|
||||
}
|
||||
|
||||
t.Run("job without Complete condition returns InProgress status", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
us, err := patch.ToUnstructured(job)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
result, err := jobConditions(us)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(result.Status).To(Equal(status.InProgressStatus))
|
||||
})
|
||||
|
||||
t.Run("job with Complete condition as True returns Current status", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
job.Status = batchv1.JobStatus{
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{
|
||||
Type: batchv1.JobComplete,
|
||||
Status: corev1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
us, err := patch.ToUnstructured(job)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
result, err := jobConditions(us)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(result.Status).To(Equal(status.CurrentStatus))
|
||||
})
|
||||
}
|
12
main.go
12
main.go
|
@ -34,7 +34,6 @@ import (
|
|||
ctrlcfg "sigs.k8s.io/controller-runtime/pkg/config"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"github.com/fluxcd/cli-utils/pkg/kstatus/polling"
|
||||
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/clusterreader"
|
||||
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
|
||||
"github.com/fluxcd/pkg/runtime/acl"
|
||||
|
@ -54,7 +53,6 @@ import (
|
|||
kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1"
|
||||
"github.com/fluxcd/kustomize-controller/internal/controller"
|
||||
"github.com/fluxcd/kustomize-controller/internal/features"
|
||||
"github.com/fluxcd/kustomize-controller/internal/statusreaders"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
|
@ -220,13 +218,9 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
jobStatusReader := statusreaders.NewCustomJobStatusReader(restMapper)
|
||||
pollingOpts := polling.Options{
|
||||
CustomStatusReaders: []engine.StatusReader{jobStatusReader},
|
||||
}
|
||||
|
||||
var clusterReader engine.ClusterReaderFactory
|
||||
if ok, _ := features.Enabled(features.DisableStatusPollerCache); ok {
|
||||
pollingOpts.ClusterReaderFactory = engine.ClusterReaderFactoryFunc(clusterreader.NewDirectClusterReader)
|
||||
clusterReader = engine.ClusterReaderFactoryFunc(clusterreader.NewDirectClusterReader)
|
||||
}
|
||||
|
||||
failFast := true
|
||||
|
@ -259,7 +253,7 @@ func main() {
|
|||
FailFast: failFast,
|
||||
ConcurrentSSA: concurrentSSA,
|
||||
KubeConfigOpts: kubeConfigOpts,
|
||||
PollingOpts: pollingOpts,
|
||||
ClusterReader: clusterReader,
|
||||
DisallowedFieldManagers: disallowedFieldManagers,
|
||||
StrictSubstitutions: strictSubstitutions,
|
||||
GroupChangeLog: groupChangeLog,
|
||||
|
|
Loading…
Reference in New Issue