Merge pull request #745 from fluxcd/status-refactoring
Refactor: Adopt Flux runtime conditions and status standards
This commit is contained in:
commit
0f131a0361
|
|
@ -20,8 +20,6 @@ import (
|
|||
"time"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
|
|
@ -231,78 +229,6 @@ type KustomizationStatus struct {
|
|||
Snapshot *Snapshot `json:"snapshot,omitempty"`
|
||||
}
|
||||
|
||||
// KustomizationProgressing resets the conditions of the given Kustomization to a single
|
||||
// ReadyCondition with status ConditionUnknown.
|
||||
func KustomizationProgressing(k Kustomization) Kustomization {
|
||||
newCondition := metav1.Condition{
|
||||
Type: meta.ReadyCondition,
|
||||
Status: metav1.ConditionUnknown,
|
||||
Reason: meta.ProgressingReason,
|
||||
Message: "reconciliation in progress",
|
||||
}
|
||||
apimeta.SetStatusCondition(k.GetStatusConditions(), newCondition)
|
||||
return k
|
||||
}
|
||||
|
||||
// SetKustomizationHealthiness sets the HealthyCondition status for a Kustomization.
|
||||
func SetKustomizationHealthiness(k *Kustomization, status metav1.ConditionStatus, reason, message string) {
|
||||
switch len(k.Spec.HealthChecks) {
|
||||
case 0:
|
||||
apimeta.RemoveStatusCondition(k.GetStatusConditions(), HealthyCondition)
|
||||
default:
|
||||
newCondition := metav1.Condition{
|
||||
Type: HealthyCondition,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: trimString(message, MaxConditionMessageLength),
|
||||
}
|
||||
apimeta.SetStatusCondition(k.GetStatusConditions(), newCondition)
|
||||
}
|
||||
}
|
||||
|
||||
// SetKustomizeReadiness sets the ReadyCondition, ObservedGeneration, and LastAttemptedRevision,
|
||||
// on the Kustomization.
|
||||
func SetKustomizationReadiness(k *Kustomization, status metav1.ConditionStatus, reason, message string, revision string) {
|
||||
newCondition := metav1.Condition{
|
||||
Type: meta.ReadyCondition,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: trimString(message, MaxConditionMessageLength),
|
||||
}
|
||||
apimeta.SetStatusCondition(k.GetStatusConditions(), newCondition)
|
||||
|
||||
k.Status.ObservedGeneration = k.Generation
|
||||
k.Status.LastAttemptedRevision = revision
|
||||
}
|
||||
|
||||
// KustomizationNotReady registers a failed apply attempt of the given Kustomization.
|
||||
func KustomizationNotReady(k Kustomization, revision, reason, message string) Kustomization {
|
||||
SetKustomizationReadiness(&k, metav1.ConditionFalse, reason, trimString(message, MaxConditionMessageLength), revision)
|
||||
if revision != "" {
|
||||
k.Status.LastAttemptedRevision = revision
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
// KustomizationNotReady registers a failed apply attempt of the given Kustomization,
|
||||
// including a Snapshot.
|
||||
func KustomizationNotReadySnapshot(k Kustomization, snapshot *Snapshot, revision, reason, message string) Kustomization {
|
||||
SetKustomizationReadiness(&k, metav1.ConditionFalse, reason, trimString(message, MaxConditionMessageLength), revision)
|
||||
SetKustomizationHealthiness(&k, metav1.ConditionFalse, reason, reason)
|
||||
k.Status.Snapshot = snapshot
|
||||
k.Status.LastAttemptedRevision = revision
|
||||
return k
|
||||
}
|
||||
|
||||
// KustomizationReady registers a successful apply attempt of the given Kustomization.
|
||||
func KustomizationReady(k Kustomization, snapshot *Snapshot, revision, reason, message string) Kustomization {
|
||||
SetKustomizationReadiness(&k, metav1.ConditionTrue, reason, trimString(message, MaxConditionMessageLength), revision)
|
||||
SetKustomizationHealthiness(&k, metav1.ConditionTrue, reason, reason)
|
||||
k.Status.Snapshot = snapshot
|
||||
k.Status.LastAppliedRevision = revision
|
||||
return k
|
||||
}
|
||||
|
||||
// GetTimeout returns the timeout with default.
|
||||
func (in Kustomization) GetTimeout() time.Duration {
|
||||
duration := in.Spec.Interval.Duration
|
||||
|
|
|
|||
|
|
@ -48,4 +48,8 @@ const (
|
|||
// ReconciliationFailedReason represents the fact that
|
||||
// the reconciliation failed.
|
||||
ReconciliationFailedReason string = "ReconciliationFailed"
|
||||
|
||||
// ProgressingWithRetryReason represents the fact that
|
||||
// the reconciliation encountered an error that will be retried.
|
||||
ProgressingWithRetryReason string = "ProgressingWithRetry"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import (
|
|||
"github.com/fluxcd/pkg/apis/kustomize"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
|
@ -236,78 +235,6 @@ type KustomizationStatus struct {
|
|||
Inventory *ResourceInventory `json:"inventory,omitempty"`
|
||||
}
|
||||
|
||||
// KustomizationProgressing resets the conditions of the given Kustomization to a single
|
||||
// ReadyCondition with status ConditionUnknown.
|
||||
func KustomizationProgressing(k Kustomization, message string) Kustomization {
|
||||
newCondition := metav1.Condition{
|
||||
Type: meta.ReadyCondition,
|
||||
Status: metav1.ConditionUnknown,
|
||||
Reason: meta.ProgressingReason,
|
||||
Message: trimString(message, MaxConditionMessageLength),
|
||||
}
|
||||
apimeta.SetStatusCondition(k.GetStatusConditions(), newCondition)
|
||||
return k
|
||||
}
|
||||
|
||||
// SetKustomizationHealthiness sets the HealthyCondition status for a Kustomization.
|
||||
func SetKustomizationHealthiness(k *Kustomization, status metav1.ConditionStatus, reason, message string) {
|
||||
if !k.Spec.Wait && len(k.Spec.HealthChecks) == 0 {
|
||||
apimeta.RemoveStatusCondition(k.GetStatusConditions(), HealthyCondition)
|
||||
} else {
|
||||
newCondition := metav1.Condition{
|
||||
Type: HealthyCondition,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: trimString(message, MaxConditionMessageLength),
|
||||
}
|
||||
apimeta.SetStatusCondition(k.GetStatusConditions(), newCondition)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// SetKustomizationReadiness sets the ReadyCondition, ObservedGeneration, and LastAttemptedRevision, on the Kustomization.
|
||||
func SetKustomizationReadiness(k *Kustomization, status metav1.ConditionStatus, reason, message string, revision string) {
|
||||
newCondition := metav1.Condition{
|
||||
Type: meta.ReadyCondition,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: trimString(message, MaxConditionMessageLength),
|
||||
}
|
||||
apimeta.SetStatusCondition(k.GetStatusConditions(), newCondition)
|
||||
|
||||
k.Status.ObservedGeneration = k.Generation
|
||||
k.Status.LastAttemptedRevision = revision
|
||||
}
|
||||
|
||||
// KustomizationNotReady registers a failed apply attempt of the given Kustomization.
|
||||
func KustomizationNotReady(k Kustomization, revision, reason, message string) Kustomization {
|
||||
SetKustomizationReadiness(&k, metav1.ConditionFalse, reason, trimString(message, MaxConditionMessageLength), revision)
|
||||
if revision != "" {
|
||||
k.Status.LastAttemptedRevision = revision
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
// KustomizationNotReadyInventory registers a failed apply attempt of the given Kustomization.
|
||||
func KustomizationNotReadyInventory(k Kustomization, inventory *ResourceInventory, revision, reason, message string) Kustomization {
|
||||
SetKustomizationReadiness(&k, metav1.ConditionFalse, reason, trimString(message, MaxConditionMessageLength), revision)
|
||||
SetKustomizationHealthiness(&k, metav1.ConditionFalse, reason, reason)
|
||||
if revision != "" {
|
||||
k.Status.LastAttemptedRevision = revision
|
||||
}
|
||||
k.Status.Inventory = inventory
|
||||
return k
|
||||
}
|
||||
|
||||
// KustomizationReadyInventory registers a successful apply attempt of the given Kustomization.
|
||||
func KustomizationReadyInventory(k Kustomization, inventory *ResourceInventory, revision, reason, message string) Kustomization {
|
||||
SetKustomizationReadiness(&k, metav1.ConditionTrue, reason, trimString(message, MaxConditionMessageLength), revision)
|
||||
SetKustomizationHealthiness(&k, metav1.ConditionTrue, reason, reason)
|
||||
k.Status.Inventory = inventory
|
||||
k.Status.LastAppliedRevision = revision
|
||||
return k
|
||||
}
|
||||
|
||||
// GetTimeout returns the timeout with default.
|
||||
func (in Kustomization) GetTimeout() time.Duration {
|
||||
duration := in.Spec.Interval.Duration - 30*time.Second
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -218,7 +218,7 @@ func TestKustomizationReconciler_Decryptor(t *testing.T) {
|
|||
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
return resultK.Status.LastAttemptedRevision == revision
|
||||
return resultK.Status.LastAppliedRevision == revision
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
|
||||
events := getEvents(resultK.GetName(), map[string]string{"kustomize.toolkit.fluxcd.io/revision": revision})
|
||||
|
|
|
|||
|
|
@ -113,28 +113,31 @@ stringData:
|
|||
resultK := &kustomizev1.Kustomization{}
|
||||
resultSecret := &corev1.Secret{}
|
||||
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
return resultK.Status.LastAppliedRevision == revision
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
|
||||
t.Run("creates immutable secret", func(t *testing.T) {
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
return resultK.Status.LastAppliedRevision == revision
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
logStatus(t, resultK)
|
||||
|
||||
kstatusCheck.CheckErr(ctx, resultK)
|
||||
g.Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: id, Namespace: id}, resultSecret)).Should(Succeed())
|
||||
})
|
||||
|
||||
t.Run("fails to update immutable secret", func(t *testing.T) {
|
||||
artifact, err := testServer.ArtifactFromFiles(manifests(id, randStringRunes(5)))
|
||||
artifact, err = testServer.ArtifactFromFiles(manifests(id, randStringRunes(5)))
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
revision := "v2.0.0"
|
||||
revision = "v2.0.0"
|
||||
err = applyGitRepository(repositoryName, artifact, revision)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
return resultK.Status.LastAttemptedRevision == revision
|
||||
return isReconcileFailure(resultK)
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
logStatus(t, resultK)
|
||||
|
||||
g.Expect(apimeta.IsStatusConditionTrue(resultK.Status.Conditions, meta.ReadyCondition)).To(BeFalse())
|
||||
//kstatusCheck.CheckErr(ctx, resultK)
|
||||
|
||||
t.Run("emits validation error event", func(t *testing.T) {
|
||||
events := getEvents(resultK.GetName(), map[string]string{"kustomize.toolkit.fluxcd.io/revision": revision})
|
||||
|
|
@ -145,9 +148,9 @@ stringData:
|
|||
})
|
||||
|
||||
t.Run("recreates immutable secret", func(t *testing.T) {
|
||||
artifact, err := testServer.ArtifactFromFiles(manifests(id, randStringRunes(5)))
|
||||
artifact, err = testServer.ArtifactFromFiles(manifests(id, randStringRunes(5)))
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
revision := "v3.0.0"
|
||||
revision = "v3.0.0"
|
||||
err = applyGitRepository(repositoryName, artifact, revision)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
|
@ -159,10 +162,12 @@ stringData:
|
|||
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
return resultK.Status.LastAppliedRevision == revision
|
||||
return isReconcileSuccess(resultK)
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
logStatus(t, resultK)
|
||||
|
||||
//kstatusCheck.CheckErr(ctx, resultK)
|
||||
|
||||
g.Expect(apimeta.IsStatusConditionTrue(resultK.Status.Conditions, meta.ReadyCondition)).To(BeTrue())
|
||||
g.Expect(apimeta.IsStatusConditionTrue(resultK.Status.Conditions, kustomizev1.HealthyCondition)).To(BeTrue())
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -130,10 +130,9 @@ data:
|
|||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
readyCondition = apimeta.FindStatusCondition(resultK.Status.Conditions, meta.ReadyCondition)
|
||||
return apimeta.IsStatusConditionFalse(resultK.Status.Conditions, meta.ReadyCondition)
|
||||
return readyCondition.Reason == kustomizev1.ReconciliationFailedReason
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
|
||||
g.Expect(readyCondition.Reason).To(Equal(kustomizev1.ReconciliationFailedReason))
|
||||
g.Expect(readyCondition.Message).To(ContainSubstring("system:serviceaccount:%s:default", id))
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -306,6 +306,20 @@ kind: Kustomization
|
|||
|
||||
g.Expect(len(resultK.Status.Inventory.Entries)).Should(BeIdenticalTo(2))
|
||||
})
|
||||
|
||||
t.Run("deletes suspended", func(t *testing.T) {
|
||||
g.Eventually(func() error {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
resultK.Spec.Suspend = true
|
||||
return k8sClient.Update(context.Background(), resultK)
|
||||
}, timeout, time.Second).Should(BeNil())
|
||||
|
||||
g.Expect(k8sClient.Delete(context.Background(), kustomization)).To(Succeed())
|
||||
g.Eventually(func() bool {
|
||||
err = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), kustomization)
|
||||
return apierrors.IsNotFound(err)
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
})
|
||||
}
|
||||
|
||||
func TestKustomizationReconciler_PruneSkipNotOwned(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -22,22 +22,27 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
"github.com/fluxcd/pkg/testserver"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
. "github.com/onsi/gomega"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
"github.com/fluxcd/pkg/runtime/conditions"
|
||||
"github.com/fluxcd/pkg/testserver"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
|
||||
kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1beta2"
|
||||
)
|
||||
|
||||
func TestKustomizationReconciler_HealthCheck(t *testing.T) {
|
||||
func TestKustomizationReconciler_WaitConditions(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
id := "wait-" + randStringRunes(5)
|
||||
revision := "v1.0.0"
|
||||
resultK := &kustomizev1.Kustomization{}
|
||||
reconcileRequestAt := metav1.Now().String()
|
||||
|
||||
err := createNamespace(id)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
|
@ -114,24 +119,22 @@ parameters:
|
|||
|
||||
g.Expect(k8sClient.Create(context.Background(), kustomization)).To(Succeed())
|
||||
|
||||
resultK := &kustomizev1.Kustomization{}
|
||||
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
return resultK.Status.LastAppliedRevision == revision
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
|
||||
t.Run("reports healthy status", func(t *testing.T) {
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
ready := apimeta.IsStatusConditionTrue(resultK.Status.Conditions, meta.ReadyCondition)
|
||||
healthy := apimeta.IsStatusConditionTrue(resultK.Status.Conditions, kustomizev1.HealthyCondition)
|
||||
return ready && healthy
|
||||
return isReconcileSuccess(resultK)
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
logStatus(t, resultK)
|
||||
|
||||
g.Expect(conditions.IsTrue(resultK, kustomizev1.HealthyCondition)).To(BeTrue())
|
||||
g.Expect(conditions.GetReason(resultK, kustomizev1.HealthyCondition)).To(BeIdenticalTo(meta.SucceededReason))
|
||||
|
||||
g.Expect(resultK.Status.ObservedGeneration).To(BeIdenticalTo(resultK.Generation))
|
||||
|
||||
//kstatusCheck.CheckErr(ctx, resultK)
|
||||
})
|
||||
|
||||
t.Run("reports unhealthy status", func(t *testing.T) {
|
||||
reconcileRequestAt := metav1.Now().String()
|
||||
t.Run("reports progressing status", func(t *testing.T) {
|
||||
g.Eventually(func() error {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
resultK.SetAnnotations(map[string]string{
|
||||
|
|
@ -149,32 +152,44 @@ parameters:
|
|||
return k8sClient.Update(context.Background(), resultK)
|
||||
}, timeout, time.Second).Should(BeNil())
|
||||
|
||||
readyCondition := &metav1.Condition{}
|
||||
healthyCondition := &metav1.Condition{}
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
readyCondition = apimeta.FindStatusCondition(resultK.Status.Conditions, meta.ReadyCondition)
|
||||
healthyCondition = apimeta.FindStatusCondition(resultK.Status.Conditions, kustomizev1.HealthyCondition)
|
||||
return healthyCondition.Reason == meta.ProgressingReason
|
||||
return isReconcileRunning(resultK)
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
logStatus(t, resultK)
|
||||
|
||||
expectedMessage := "running health checks"
|
||||
g.Expect(readyCondition.Status).To(BeIdenticalTo(metav1.ConditionUnknown))
|
||||
g.Expect(readyCondition.Message).To(ContainSubstring(expectedMessage))
|
||||
g.Expect(healthyCondition.Status).To(BeIdenticalTo(metav1.ConditionUnknown))
|
||||
g.Expect(healthyCondition.Message).To(ContainSubstring(expectedMessage))
|
||||
expectedMessage := "Running health checks"
|
||||
g.Expect(conditions.IsUnknown(resultK, kustomizev1.HealthyCondition)).To(BeTrue())
|
||||
g.Expect(conditions.IsUnknown(resultK, meta.ReadyCondition)).To(BeTrue())
|
||||
|
||||
for _, c := range []string{meta.ReconcilingCondition, kustomizev1.HealthyCondition} {
|
||||
g.Expect(conditions.GetReason(resultK, c)).To(BeIdenticalTo(meta.ProgressingReason))
|
||||
g.Expect(conditions.GetMessage(resultK, c)).To(ContainSubstring(expectedMessage))
|
||||
g.Expect(conditions.GetObservedGeneration(resultK, c)).To(BeIdenticalTo(resultK.Generation))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("reports unhealthy status", func(t *testing.T) {
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
readyCondition = apimeta.FindStatusCondition(resultK.Status.Conditions, meta.ReadyCondition)
|
||||
healthyCondition = apimeta.FindStatusCondition(resultK.Status.Conditions, kustomizev1.HealthyCondition)
|
||||
return healthyCondition.Reason == kustomizev1.HealthCheckFailedReason
|
||||
}, time.Minute, time.Second).Should(BeTrue())
|
||||
return isReconcileFailure(resultK)
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
logStatus(t, resultK)
|
||||
|
||||
for _, c := range []string{kustomizev1.HealthyCondition, meta.ReadyCondition} {
|
||||
g.Expect(conditions.IsFalse(resultK, c)).To(BeTrue())
|
||||
g.Expect(conditions.GetReason(resultK, c)).To(BeIdenticalTo(kustomizev1.HealthCheckFailedReason))
|
||||
g.Expect(conditions.GetObservedGeneration(resultK, c)).To(BeIdenticalTo(resultK.Generation))
|
||||
}
|
||||
|
||||
expectedMessage := "Running health checks"
|
||||
g.Expect(conditions.GetReason(resultK, meta.ReconcilingCondition)).To(BeIdenticalTo(kustomizev1.ProgressingWithRetryReason))
|
||||
g.Expect(conditions.GetMessage(resultK, meta.ReconcilingCondition)).To(ContainSubstring(expectedMessage))
|
||||
|
||||
g.Expect(resultK.Status.LastHandledReconcileAt).To(BeIdenticalTo(reconcileRequestAt))
|
||||
g.Expect(readyCondition.Status).To(BeIdenticalTo(metav1.ConditionFalse))
|
||||
g.Expect(healthyCondition.Status).To(BeIdenticalTo(metav1.ConditionFalse))
|
||||
g.Expect(healthyCondition.Message).To(BeIdenticalTo(kustomizev1.HealthCheckFailedReason))
|
||||
g.Expect(resultK.Status.ObservedGeneration).To(BeIdenticalTo(resultK.Generation - 1))
|
||||
|
||||
//kstatusCheck.CheckErr(ctx, resultK)
|
||||
})
|
||||
|
||||
t.Run("emits unhealthy event", func(t *testing.T) {
|
||||
|
|
@ -193,10 +208,24 @@ parameters:
|
|||
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
ready := apimeta.IsStatusConditionTrue(resultK.Status.Conditions, meta.ReadyCondition)
|
||||
healthy := apimeta.IsStatusConditionTrue(resultK.Status.Conditions, kustomizev1.HealthyCondition)
|
||||
return ready && healthy
|
||||
return isReconcileSuccess(resultK)
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
logStatus(t, resultK)
|
||||
|
||||
expectedMessage := "Health check passed"
|
||||
g.Expect(conditions.IsTrue(resultK, kustomizev1.HealthyCondition)).To(BeTrue())
|
||||
g.Expect(conditions.GetReason(resultK, kustomizev1.HealthyCondition)).To(BeIdenticalTo(meta.SucceededReason))
|
||||
g.Expect(conditions.GetObservedGeneration(resultK, kustomizev1.HealthyCondition)).To(BeIdenticalTo(resultK.Generation))
|
||||
g.Expect(conditions.GetMessage(resultK, kustomizev1.HealthyCondition)).To(ContainSubstring(expectedMessage))
|
||||
|
||||
g.Expect(conditions.IsTrue(resultK, meta.ReadyCondition)).To(BeTrue())
|
||||
g.Expect(conditions.GetReason(resultK, meta.ReadyCondition)).To(BeIdenticalTo(kustomizev1.ReconciliationSucceededReason))
|
||||
g.Expect(conditions.GetObservedGeneration(resultK, meta.ReadyCondition)).To(BeIdenticalTo(resultK.Generation))
|
||||
g.Expect(conditions.GetMessage(resultK, meta.ReadyCondition)).To(BeIdenticalTo(fmt.Sprintf("Applied revision: %s", revision)))
|
||||
|
||||
g.Expect(resultK.Status.ObservedGeneration).To(BeIdenticalTo(resultK.Generation))
|
||||
|
||||
//kstatusCheck.CheckErr(ctx, resultK)
|
||||
})
|
||||
|
||||
t.Run("emits recovery event", func(t *testing.T) {
|
||||
|
|
@ -206,4 +235,44 @@ parameters:
|
|||
g.Expect(events[len(events)-2].Type).To(BeIdenticalTo("Normal"))
|
||||
g.Expect(events[len(events)-2].Message).To(ContainSubstring(expectedMessage))
|
||||
})
|
||||
|
||||
t.Run("reports new revision healthy status", func(t *testing.T) {
|
||||
revision = "v2.0.0"
|
||||
artifact, err = testServer.ArtifactFromFiles(manifests(id, revision))
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
err = applyGitRepository(repositoryName, artifact, revision)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Eventually(func() bool {
|
||||
_ = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
return resultK.Status.LastAppliedRevision == revision
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
logStatus(t, resultK)
|
||||
|
||||
g.Expect(isReconcileSuccess(resultK)).To(BeTrue())
|
||||
g.Expect(conditions.IsTrue(resultK, kustomizev1.HealthyCondition)).To(BeTrue())
|
||||
g.Expect(conditions.GetMessage(resultK, meta.ReadyCondition)).To(BeIdenticalTo(fmt.Sprintf("Applied revision: %s", revision)))
|
||||
|
||||
g.Expect(resultK.Status.LastAttemptedRevision).To(BeIdenticalTo(resultK.Status.LastAppliedRevision))
|
||||
|
||||
//kstatusCheck.CheckErr(ctx, resultK)
|
||||
})
|
||||
|
||||
t.Run("emits event for the new revision", func(t *testing.T) {
|
||||
expectedMessage := "Health check passed"
|
||||
events := getEvents(resultK.GetName(), map[string]string{"kustomize.toolkit.fluxcd.io/revision": revision})
|
||||
g.Expect(len(events) > 1).To(BeTrue())
|
||||
g.Expect(events[len(events)-2].Type).To(BeIdenticalTo("Normal"))
|
||||
g.Expect(events[len(events)-2].Message).To(ContainSubstring(expectedMessage))
|
||||
})
|
||||
|
||||
t.Run("finalizes object", func(t *testing.T) {
|
||||
g.Expect(controllerutil.ContainsFinalizer(resultK, kustomizev1.KustomizationFinalizer)).To(BeTrue())
|
||||
g.Expect(k8sClient.Delete(context.Background(), resultK)).To(Succeed())
|
||||
|
||||
g.Eventually(func() bool {
|
||||
err = k8sClient.Get(context.Background(), client.ObjectKeyFromObject(kustomization), resultK)
|
||||
return apierrors.IsNotFound(err)
|
||||
}, timeout, time.Second).Should(BeTrue())
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,15 +23,10 @@ import (
|
|||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sigs.k8s.io/yaml"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1beta2"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
"github.com/fluxcd/pkg/runtime/controller"
|
||||
"github.com/fluxcd/pkg/runtime/testenv"
|
||||
"github.com/fluxcd/pkg/testserver"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/ory/dockertest"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
@ -43,6 +38,16 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
controllerLog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
"github.com/fluxcd/pkg/runtime/conditions"
|
||||
kcheck "github.com/fluxcd/pkg/runtime/conditions/check"
|
||||
"github.com/fluxcd/pkg/runtime/controller"
|
||||
"github.com/fluxcd/pkg/runtime/testenv"
|
||||
"github.com/fluxcd/pkg/testserver"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
|
||||
kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1beta2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
@ -65,6 +70,7 @@ var (
|
|||
testMetricsH controller.Metrics
|
||||
ctx = ctrl.SetupSignalHandler()
|
||||
kubeConfig []byte
|
||||
kstatusCheck *kcheck.Checker
|
||||
debugMode = os.Getenv("DEBUG_TEST") != ""
|
||||
)
|
||||
|
||||
|
|
@ -156,11 +162,15 @@ func TestMain(m *testing.M) {
|
|||
runInContext(func(testEnv *testenv.Environment) {
|
||||
controllerName := "kustomize-controller"
|
||||
testMetricsH = controller.MustMakeMetrics(testEnv)
|
||||
kstatusCheck = kcheck.NewChecker(testEnv.Client,
|
||||
&kcheck.Conditions{
|
||||
NegativePolarity: []string{meta.StalledCondition, meta.ReconcilingCondition},
|
||||
})
|
||||
reconciler = &KustomizationReconciler{
|
||||
ControllerName: controllerName,
|
||||
Client: testEnv,
|
||||
EventRecorder: testEnv.GetEventRecorderFor(controllerName),
|
||||
MetricsRecorder: testMetricsH.MetricsRecorder,
|
||||
ControllerName: controllerName,
|
||||
Client: testEnv,
|
||||
EventRecorder: testEnv.GetEventRecorderFor(controllerName),
|
||||
Metrics: testMetricsH,
|
||||
}
|
||||
if err := (reconciler).SetupWithManager(testEnv, KustomizationReconcilerOptions{
|
||||
MaxConcurrentReconciles: 4,
|
||||
|
|
@ -186,6 +196,39 @@ func randStringRunes(n int) string {
|
|||
return string(b)
|
||||
}
|
||||
|
||||
func isReconcileRunning(k *kustomizev1.Kustomization) bool {
|
||||
return conditions.IsReconciling(k) &&
|
||||
conditions.GetReason(k, meta.ReconcilingCondition) != kustomizev1.ProgressingWithRetryReason
|
||||
}
|
||||
|
||||
func isReconcileSuccess(k *kustomizev1.Kustomization) bool {
|
||||
return conditions.IsReady(k) &&
|
||||
conditions.GetObservedGeneration(k, meta.ReadyCondition) == k.Generation &&
|
||||
k.Status.ObservedGeneration == k.Generation &&
|
||||
k.Status.LastAppliedRevision == k.Status.LastAttemptedRevision
|
||||
}
|
||||
|
||||
func isReconcileFailure(k *kustomizev1.Kustomization) bool {
|
||||
if conditions.IsStalled(k) {
|
||||
return true
|
||||
}
|
||||
|
||||
isHandled := true
|
||||
if v, ok := meta.ReconcileAnnotationValue(k.GetAnnotations()); ok {
|
||||
isHandled = k.Status.LastHandledReconcileAt == v
|
||||
}
|
||||
|
||||
return isHandled && conditions.IsReconciling(k) &&
|
||||
conditions.IsFalse(k, meta.ReadyCondition) &&
|
||||
conditions.GetObservedGeneration(k, meta.ReadyCondition) == k.Generation &&
|
||||
conditions.GetReason(k, meta.ReconcilingCondition) == kustomizev1.ProgressingWithRetryReason
|
||||
}
|
||||
|
||||
func logStatus(t *testing.T, k *kustomizev1.Kustomization) {
|
||||
sts, _ := yaml.Marshal(k.Status)
|
||||
t.Log(string(sts))
|
||||
}
|
||||
|
||||
func getEvents(objName string, annotations map[string]string) []corev1.Event {
|
||||
var result []corev1.Event
|
||||
events := &corev1.EventList{}
|
||||
|
|
|
|||
|
|
@ -1345,20 +1345,38 @@ secretGenerator:
|
|||
|
||||
## Status
|
||||
|
||||
When the controller completes a Kustomization reconciliation, reports the result in the `status` sub-resource.
|
||||
Every time the controller starts reconciling a `Kustomization`, it adds the `Reconciling` condition in `status` and
|
||||
updates its message to report the action performed during a reconciliation run:
|
||||
|
||||
A successful reconciliation sets the ready condition to `true` and updates the revision field:
|
||||
```yaml
|
||||
conditions:
|
||||
- lastTransitionTime: "2022-10-17T13:40:21Z"
|
||||
message: Detecting drift for revision main/a1afe267b54f38b46b487f6e938a6fd508278c07 with a timeout of 50s
|
||||
observedGeneration: 2
|
||||
reason: Progressing
|
||||
status: "True"
|
||||
type: Reconciling
|
||||
- lastTransitionTime: "2022-10-17T13:40:21Z"
|
||||
message: Reconciliation in progress
|
||||
observedGeneration: 2
|
||||
reason: Progressing
|
||||
status: Unknown
|
||||
type: Ready
|
||||
```
|
||||
|
||||
If the reconciliation finishes successfully, the `Reconciling` condition is removed from `status`
|
||||
and the `Ready` condition is set to `True`:
|
||||
|
||||
```yaml
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: "2020-09-17T19:28:48Z"
|
||||
message: "Applied revision: master/a1afe267b54f38b46b487f6e938a6fd508278c07"
|
||||
- lastTransitionTime: "2022-10-17T13:40:21Z"
|
||||
message: "Applied revision: main/a1afe267b54f38b46b487f6e938a6fd508278c07"
|
||||
reason: ReconciliationSucceeded
|
||||
status: "True"
|
||||
type: Ready
|
||||
lastAppliedRevision: master/a1afe267b54f38b46b487f6e938a6fd508278c07
|
||||
lastAttemptedRevision: master/a1afe267b54f38b46b487f6e938a6fd508278c07
|
||||
lastAppliedRevision: main/a1afe267b54f38b46b487f6e938a6fd508278c07
|
||||
lastAttemptedRevision: main/a1afe267b54f38b46b487f6e938a6fd508278c07
|
||||
```
|
||||
|
||||
If `spec.wait` or `spec.healthChecks` is enabled, the health assessment result
|
||||
|
|
@ -1376,10 +1394,12 @@ The controller logs the Kubernetes objects:
|
|||
```json
|
||||
{
|
||||
"level": "info",
|
||||
"ts": "2020-09-17T07:27:11.921Z",
|
||||
"logger": "controllers.Kustomization",
|
||||
"msg": "Kustomization applied in 1.436096591s",
|
||||
"kustomization": "default/backend",
|
||||
"ts": "2022-09-17T07:27:11.921Z",
|
||||
"controllerGroup": "kustomize.toolkit.fluxcd.io",
|
||||
"msg": "server-side apply completed",
|
||||
"name": "backend",
|
||||
"namespace": "default",
|
||||
"revision": "main/a1afe267b54f38b46b487f6e938a6fd508278c07",
|
||||
"output": {
|
||||
"service/backend": "created",
|
||||
"deployment.apps/backend": "created",
|
||||
|
|
@ -1388,7 +1408,7 @@ The controller logs the Kubernetes objects:
|
|||
}
|
||||
```
|
||||
|
||||
A failed reconciliation sets the ready condition to `false`:
|
||||
A failed reconciliation sets the `Ready` condition to `false`:
|
||||
|
||||
```yaml
|
||||
status:
|
||||
|
|
@ -1409,9 +1429,12 @@ When a reconciliation fails, the controller logs the error and issues a Kubernet
|
|||
```json
|
||||
{
|
||||
"level": "error",
|
||||
"ts": "2020-09-17T07:27:11.921Z",
|
||||
"logger": "controllers.Kustomization",
|
||||
"kustomization": "default/backend",
|
||||
"ts": "2022-09-17T07:27:11.921Z",
|
||||
"controllerGroup": "kustomize.toolkit.fluxcd.io",
|
||||
"msg": "server-side apply completed",
|
||||
"name": "backend",
|
||||
"namespace": "default",
|
||||
"revision": "main/a1afe267b54f38b46b487f6e938a6fd508278c07",
|
||||
"error": "The Service 'backend' is invalid: spec.type: Unsupported value: 'Ingress'"
|
||||
}
|
||||
```
|
||||
|
|
|
|||
2
go.mod
2
go.mod
|
|
@ -26,7 +26,7 @@ require (
|
|||
github.com/fluxcd/pkg/apis/meta v0.17.0
|
||||
github.com/fluxcd/pkg/http/fetch v0.1.0
|
||||
github.com/fluxcd/pkg/kustomize v0.8.0
|
||||
github.com/fluxcd/pkg/runtime v0.20.0
|
||||
github.com/fluxcd/pkg/runtime v0.22.0
|
||||
github.com/fluxcd/pkg/ssa v0.21.0
|
||||
github.com/fluxcd/pkg/tar v0.1.0
|
||||
github.com/fluxcd/pkg/testserver v0.4.0
|
||||
|
|
|
|||
4
go.sum
4
go.sum
|
|
@ -294,8 +294,8 @@ github.com/fluxcd/pkg/http/fetch v0.1.0 h1:Ig/kZuM0+jHBJnwHn5UUseTKIYD5w8X4bInJy
|
|||
github.com/fluxcd/pkg/http/fetch v0.1.0/go.mod h1:1CjOSfn7aOeHf2ZRA2+GTKHg442zN6X/fSys3a0KLC0=
|
||||
github.com/fluxcd/pkg/kustomize v0.8.0 h1:8AdEvp6y38ISZzoi0H82Si5zkmLXClbeX10W7HevB00=
|
||||
github.com/fluxcd/pkg/kustomize v0.8.0/go.mod h1:zGtCZF6V3hMWcf46SqrQc10fS9yUlKzi2UcFUeabDAE=
|
||||
github.com/fluxcd/pkg/runtime v0.20.0 h1:F9q9wap0BhjQszboUroJrYOB1C831zkQwTAk2tlMIQc=
|
||||
github.com/fluxcd/pkg/runtime v0.20.0/go.mod h1:KVHNQMhccuLTjMDFVCr/SF+4Z554bcMH1LncC4sQf8o=
|
||||
github.com/fluxcd/pkg/runtime v0.22.0 h1:4YV/An41b+OGdSWDogwFfHr22CEE/in+lBLEK0fr1yc=
|
||||
github.com/fluxcd/pkg/runtime v0.22.0/go.mod h1:Cm6jIhltzXIM3CRRY6SFASDn+z2m/1yPqOWwD73c3io=
|
||||
github.com/fluxcd/pkg/ssa v0.21.0 h1:aeoTohPNf5x7jQjHidyLJAOHw3EyHOQoQN3mN2i+4cc=
|
||||
github.com/fluxcd/pkg/ssa v0.21.0/go.mod h1:jumyhUbEMDnduN7anSlKfxl2fEoyeyv+Ta5hWCbxI5Q=
|
||||
github.com/fluxcd/pkg/tar v0.1.0 h1:ObyUml8NJtGQtz/cRgexd7HU2mQsTmgjz2dtX4xdnng=
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ type Decryptor struct {
|
|||
client client.Client
|
||||
// kustomization is the v1beta2.Kustomization we are decrypting for.
|
||||
// The v1beta2.Decryption of the object is used to ImportKeys().
|
||||
kustomization kustomizev1.Kustomization
|
||||
kustomization *kustomizev1.Kustomization
|
||||
// maxFileSize is the max size in bytes a file is allowed to have to be
|
||||
// decrypted. Defaults to maxEncryptedFileSize.
|
||||
maxFileSize int64
|
||||
|
|
@ -154,7 +154,7 @@ type Decryptor struct {
|
|||
|
||||
// NewDecryptor creates a new Decryptor for the given kustomization.
|
||||
// gnuPGHome can be empty, in which case the systems' keyring is used.
|
||||
func NewDecryptor(root string, client client.Client, kustomization kustomizev1.Kustomization, maxFileSize int64, gnuPGHome string) *Decryptor {
|
||||
func NewDecryptor(root string, client client.Client, kustomization *kustomizev1.Kustomization, maxFileSize int64, gnuPGHome string) *Decryptor {
|
||||
return &Decryptor{
|
||||
root: root,
|
||||
client: client,
|
||||
|
|
@ -166,7 +166,7 @@ func NewDecryptor(root string, client client.Client, kustomization kustomizev1.K
|
|||
|
||||
// NewTempDecryptor creates a new Decryptor, with a temporary GnuPG
|
||||
// home directory to Decryptor.ImportKeys() into.
|
||||
func NewTempDecryptor(root string, client client.Client, kustomization kustomizev1.Kustomization) (*Decryptor, func(), error) {
|
||||
func NewTempDecryptor(root string, client client.Client, kustomization *kustomizev1.Kustomization) (*Decryptor, func(), error) {
|
||||
gnuPGHome, err := pgp.NewGnuPGHome()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot create decryptor: %w", err)
|
||||
|
|
|
|||
|
|
@ -375,7 +375,7 @@ clientSecret: some-client-secret`),
|
|||
},
|
||||
}
|
||||
|
||||
d, cleanup, err := NewTempDecryptor("", cb.Build(), kustomization)
|
||||
d, cleanup, err := NewTempDecryptor("", cb.Build(), &kustomization)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
|
|
@ -442,7 +442,6 @@ func TestDecryptor_SopsDecryptWithFormat(t *testing.T) {
|
|||
g.Expect(bytes.Contains(encData, sopsFormatToMarkerBytes[inputFormat])).To(BeTrue())
|
||||
|
||||
out, err := kd.SopsDecryptWithFormat(encData, inputFormat, outputFormat)
|
||||
t.Logf("%s", out)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(out).To(Equal([]byte("key: value\n")))
|
||||
})
|
||||
|
|
@ -551,7 +550,7 @@ func TestDecryptor_DecryptResource(t *testing.T) {
|
|||
Provider: DecryptionProviderSOPS,
|
||||
}
|
||||
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), *kus)
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), kus)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
|
|
@ -592,7 +591,7 @@ func TestDecryptor_DecryptResource(t *testing.T) {
|
|||
Provider: DecryptionProviderSOPS,
|
||||
}
|
||||
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), *kus)
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), kus)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
|
|
@ -627,7 +626,7 @@ func TestDecryptor_DecryptResource(t *testing.T) {
|
|||
Provider: DecryptionProviderSOPS,
|
||||
}
|
||||
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), *kus)
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), kus)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
|
|
@ -662,7 +661,7 @@ func TestDecryptor_DecryptResource(t *testing.T) {
|
|||
Provider: DecryptionProviderSOPS,
|
||||
}
|
||||
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), *kus)
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), kus)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
|
|
@ -710,7 +709,7 @@ func TestDecryptor_DecryptResource(t *testing.T) {
|
|||
t.Run("nil resource", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), *kustomization.DeepCopy())
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), kustomization.DeepCopy())
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
|
|
@ -722,7 +721,7 @@ func TestDecryptor_DecryptResource(t *testing.T) {
|
|||
t.Run("no decryption spec", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), *kustomization.DeepCopy())
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), kustomization.DeepCopy())
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
|
|
@ -738,7 +737,7 @@ func TestDecryptor_DecryptResource(t *testing.T) {
|
|||
kus.Spec.Decryption = &kustomizev1.Decryption{
|
||||
Provider: "not-supported",
|
||||
}
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), *kus)
|
||||
d, cleanup, err := NewTempDecryptor("", fake.NewClientBuilder().Build(), kus)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
|
|
|
|||
|
|
@ -36,10 +36,10 @@ import (
|
|||
|
||||
type KustomizeGenerator struct {
|
||||
root string
|
||||
kustomization kustomizev1.Kustomization
|
||||
kustomization *kustomizev1.Kustomization
|
||||
}
|
||||
|
||||
func NewGenerator(root string, kustomization kustomizev1.Kustomization) *KustomizeGenerator {
|
||||
func NewGenerator(root string, kustomization *kustomizev1.Kustomization) *KustomizeGenerator {
|
||||
return &KustomizeGenerator{
|
||||
root: root,
|
||||
kustomization: kustomization,
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ func TestGenerator_WriteFile(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
kfile, err := NewGenerator(filepath.Join(tmpDir, tt.dir), ks).WriteFile(filepath.Join(tmpDir, tt.dir))
|
||||
kfile, err := NewGenerator(filepath.Join(tmpDir, tt.dir), &ks).WriteFile(filepath.Join(tmpDir, tt.dir))
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
kfileYAML, err := os.ReadFile(kfile)
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ const varsubRegex = "^[_[:alpha:]][_[:alpha:][:digit:]]*$"
|
|||
func SubstituteVariables(
|
||||
ctx context.Context,
|
||||
kubeClient client.Client,
|
||||
kustomization kustomizev1.Kustomization,
|
||||
kustomization *kustomizev1.Kustomization,
|
||||
res *resource.Resource) (*resource.Resource, error) {
|
||||
resData, err := res.AsYAML()
|
||||
if err != nil {
|
||||
|
|
|
|||
24
main.go
24
main.go
|
|
@ -29,15 +29,13 @@ import (
|
|||
"sigs.k8s.io/cli-utils/pkg/kstatus/polling"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
"github.com/fluxcd/pkg/runtime/acl"
|
||||
"github.com/fluxcd/pkg/runtime/client"
|
||||
helper "github.com/fluxcd/pkg/runtime/controller"
|
||||
runtimeClient "github.com/fluxcd/pkg/runtime/client"
|
||||
runtimeCtrl "github.com/fluxcd/pkg/runtime/controller"
|
||||
"github.com/fluxcd/pkg/runtime/events"
|
||||
"github.com/fluxcd/pkg/runtime/leaderelection"
|
||||
"github.com/fluxcd/pkg/runtime/logger"
|
||||
"github.com/fluxcd/pkg/runtime/metrics"
|
||||
"github.com/fluxcd/pkg/runtime/pprof"
|
||||
"github.com/fluxcd/pkg/runtime/probes"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
|
|
@ -70,11 +68,11 @@ func main() {
|
|||
healthAddr string
|
||||
concurrent int
|
||||
requeueDependency time.Duration
|
||||
clientOptions client.Options
|
||||
kubeConfigOpts client.KubeConfigOptions
|
||||
clientOptions runtimeClient.Options
|
||||
kubeConfigOpts runtimeClient.KubeConfigOptions
|
||||
logOptions logger.Options
|
||||
leaderElectionOptions leaderelection.Options
|
||||
rateLimiterOptions helper.RateLimiterOptions
|
||||
rateLimiterOptions runtimeCtrl.RateLimiterOptions
|
||||
aclOptions acl.Options
|
||||
watchAllNamespaces bool
|
||||
noRemoteBases bool
|
||||
|
|
@ -103,15 +101,12 @@ func main() {
|
|||
|
||||
ctrl.SetLogger(logger.NewLogger(logOptions))
|
||||
|
||||
metricsRecorder := metrics.NewRecorder()
|
||||
crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...)
|
||||
|
||||
watchNamespace := ""
|
||||
if !watchAllNamespaces {
|
||||
watchNamespace = os.Getenv("RUNTIME_NAMESPACE")
|
||||
}
|
||||
|
||||
restConfig := client.GetConfigOrDie(clientOptions)
|
||||
restConfig := runtimeClient.GetConfigOrDie(clientOptions)
|
||||
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsAddr,
|
||||
|
|
@ -140,6 +135,8 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
metricsH := runtimeCtrl.MustMakeMetrics(mgr)
|
||||
|
||||
jobStatusReader := statusreaders.NewCustomJobStatusReader(mgr.GetRESTMapper())
|
||||
pollingOpts := polling.Options{
|
||||
CustomStatusReaders: []engine.StatusReader{jobStatusReader},
|
||||
|
|
@ -148,9 +145,8 @@ func main() {
|
|||
ControllerName: controllerName,
|
||||
DefaultServiceAccount: defaultServiceAccount,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Metrics: metricsH,
|
||||
EventRecorder: eventRecorder,
|
||||
MetricsRecorder: metricsRecorder,
|
||||
NoCrossNamespaceRefs: aclOptions.NoCrossNamespaceRefs,
|
||||
NoRemoteBases: noRemoteBases,
|
||||
KubeConfigOpts: kubeConfigOpts,
|
||||
|
|
@ -160,7 +156,7 @@ func main() {
|
|||
MaxConcurrentReconciles: concurrent,
|
||||
DependencyRequeueInterval: requeueDependency,
|
||||
HTTPRetry: httpRetry,
|
||||
RateLimiter: helper.GetRateLimiter(rateLimiterOptions),
|
||||
RateLimiter: runtimeCtrl.GetRateLimiter(rateLimiterOptions),
|
||||
}); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", controllerName)
|
||||
os.Exit(1)
|
||||
|
|
|
|||
Loading…
Reference in New Issue