fix(lint): enable cyclop
Signed-off-by: Luca Burgazzoli <lburgazzoli@gmail.com>
This commit is contained in:
parent
c545400093
commit
3e366c0714
|
@ -68,5 +68,4 @@ linters:
|
|||
- gochecknoinits
|
||||
- depguard
|
||||
# validate
|
||||
- cyclop
|
||||
- gocognit
|
||||
|
|
|
@ -5,6 +5,8 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/dapr/kubernetes-operator/pkg/controller"
|
||||
|
@ -50,11 +52,6 @@ func (a *ApplyCRDsAction) Run(ctx context.Context, rc *ReconciliationRequest) er
|
|||
}
|
||||
|
||||
for _, crd := range crds {
|
||||
dc, err := rc.Client.Dynamic(rc.Resource.Namespace, &crd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create dynamic client: %w", err)
|
||||
}
|
||||
|
||||
resources.Labels(&crd, map[string]string{
|
||||
helm.ReleaseGeneration: strconv.FormatInt(rc.Resource.Generation, 10),
|
||||
helm.ReleaseName: rc.Resource.Name,
|
||||
|
@ -62,41 +59,10 @@ func (a *ApplyCRDsAction) Run(ctx context.Context, rc *ReconciliationRequest) er
|
|||
helm.ReleaseVersion: c.Version(),
|
||||
})
|
||||
|
||||
apply := rc.Resource.Generation != rc.Resource.Status.ObservedGeneration
|
||||
|
||||
_, err = dc.Get(ctx, crd.GetName(), metav1.GetOptions{})
|
||||
if err != nil && !k8serrors.IsNotFound(err) {
|
||||
return fmt.Errorf("cannot determine if CRD %s exists: %w", resources.Ref(&crd), err)
|
||||
}
|
||||
|
||||
if err != nil && k8serrors.IsNotFound(err) {
|
||||
apply = true
|
||||
}
|
||||
|
||||
if !apply {
|
||||
a.l.Info("run",
|
||||
"apply", "false",
|
||||
"gen", rc.Resource.Generation,
|
||||
"ref", resources.Ref(&crd),
|
||||
"generation-changed", rc.Resource.Generation != rc.Resource.Status.ObservedGeneration,
|
||||
"not-found", k8serrors.IsNotFound(err))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = dc.Apply(ctx, crd.GetName(), &crd, metav1.ApplyOptions{
|
||||
FieldManager: controller.FieldManager,
|
||||
Force: true,
|
||||
})
|
||||
|
||||
err = a.apply(ctx, rc, &crd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot apply CRD %s: %w", resources.Ref(&crd), err)
|
||||
return err
|
||||
}
|
||||
|
||||
a.l.Info("run",
|
||||
"apply", "true",
|
||||
"gen", rc.Resource.Generation,
|
||||
"ref", resources.Ref(&crd))
|
||||
}
|
||||
|
||||
// invalidate the client so it gets aware of the new CRDs
|
||||
|
@ -108,3 +74,48 @@ func (a *ApplyCRDsAction) Run(ctx context.Context, rc *ReconciliationRequest) er
|
|||
func (a *ApplyCRDsAction) Cleanup(_ context.Context, _ *ReconciliationRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ApplyCRDsAction) apply(ctx context.Context, rc *ReconciliationRequest, crd *unstructured.Unstructured) error {
|
||||
dc, err := rc.Client.Dynamic(rc.Resource.Namespace, crd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create dynamic client: %w", err)
|
||||
}
|
||||
|
||||
apply := rc.Resource.Generation != rc.Resource.Status.ObservedGeneration
|
||||
|
||||
_, err = dc.Get(ctx, crd.GetName(), metav1.GetOptions{})
|
||||
if err != nil && !k8serrors.IsNotFound(err) {
|
||||
return fmt.Errorf("cannot determine if CRD %s exists: %w", resources.Ref(crd), err)
|
||||
}
|
||||
|
||||
if err != nil && k8serrors.IsNotFound(err) {
|
||||
apply = true
|
||||
}
|
||||
|
||||
if !apply {
|
||||
a.l.Info("run",
|
||||
"apply", "false",
|
||||
"gen", rc.Resource.Generation,
|
||||
"ref", resources.Ref(crd),
|
||||
"generation-changed", rc.Resource.Generation != rc.Resource.Status.ObservedGeneration,
|
||||
"not-found", k8serrors.IsNotFound(err))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = dc.Apply(ctx, crd.GetName(), crd, metav1.ApplyOptions{
|
||||
FieldManager: controller.FieldManager,
|
||||
Force: true,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot apply CRD %s: %w", resources.Ref(crd), err)
|
||||
}
|
||||
|
||||
a.l.Info("run",
|
||||
"apply", "true",
|
||||
"gen", rc.Resource.Generation,
|
||||
"ref", resources.Ref(crd))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -6,6 +6,8 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/dapr/kubernetes-operator/pkg/controller/predicates"
|
||||
|
||||
"github.com/dapr/kubernetes-operator/pkg/controller"
|
||||
|
@ -66,9 +68,9 @@ func (a *ApplyResourcesAction) Run(ctx context.Context, rc *ReconciliationReques
|
|||
installedVersion = rc.Resource.Status.Chart.Version
|
||||
}
|
||||
|
||||
reinstall := rc.Resource.Generation != rc.Resource.Status.ObservedGeneration || c.Version() != installedVersion
|
||||
force := rc.Resource.Generation != rc.Resource.Status.ObservedGeneration || c.Version() != installedVersion
|
||||
|
||||
if reinstall {
|
||||
if force {
|
||||
rc.Reconciler.Event(
|
||||
rc.Resource,
|
||||
corev1.EventTypeNormal,
|
||||
|
@ -81,20 +83,7 @@ func (a *ApplyResourcesAction) Run(ctx context.Context, rc *ReconciliationReques
|
|||
)
|
||||
}
|
||||
|
||||
for i := range items {
|
||||
obj := items[i]
|
||||
gvk := obj.GroupVersionKind()
|
||||
installOnly := a.installOnly(gvk)
|
||||
|
||||
if reinstall {
|
||||
installOnly = false
|
||||
}
|
||||
|
||||
dc, err := rc.Client.Dynamic(rc.Resource.Namespace, &obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create dynamic client: %w", err)
|
||||
}
|
||||
|
||||
for _, obj := range items {
|
||||
resources.Labels(&obj, map[string]string{
|
||||
helm.ReleaseGeneration: strconv.FormatInt(rc.Resource.Generation, 10),
|
||||
helm.ReleaseName: rc.Resource.Name,
|
||||
|
@ -102,117 +91,16 @@ func (a *ApplyResourcesAction) Run(ctx context.Context, rc *ReconciliationReques
|
|||
helm.ReleaseVersion: c.Version(),
|
||||
})
|
||||
|
||||
switch dc.(type) {
|
||||
//
|
||||
// NamespacedResource: in this case, filtering with ownership can be implemented
|
||||
// as all the namespaced resources created by this controller have the Dapr CR as
|
||||
// an owner
|
||||
//
|
||||
case *client.NamespacedResource:
|
||||
obj.SetOwnerReferences(resources.OwnerReferences(rc.Resource))
|
||||
obj.SetNamespace(rc.Resource.Namespace)
|
||||
gvk := obj.GroupVersionKind()
|
||||
|
||||
r := gvk.GroupVersion().String() + ":" + gvk.Kind
|
||||
|
||||
if _, ok := a.subscriptions[r]; !ok {
|
||||
a.l.Info("watch", "ref", r)
|
||||
|
||||
err = rc.Reconciler.Watch(
|
||||
&obj,
|
||||
rc.Reconciler.EnqueueRequestForOwner(&daprApi.DaprInstance{}, handler.OnlyControllerOwner()),
|
||||
dependantWithLabels(
|
||||
predicates.WithWatchUpdate(a.watchForUpdates(gvk)),
|
||||
predicates.WithWatchDeleted(true),
|
||||
predicates.WithWatchStatus(a.watchStatus(gvk)),
|
||||
),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.subscriptions[r] = struct{}{}
|
||||
}
|
||||
|
||||
//
|
||||
// ClusteredResource: in this case, ownership based filtering is not supported
|
||||
// as you cannot have a non namespaced owner. For such reason, the resource for
|
||||
// which a reconcile should be triggered can be identified by using the labels
|
||||
// added by the controller to all the generated resources
|
||||
//
|
||||
// helm.operator.dapr.io/resource.namespace = ${namespace}
|
||||
// helm.operator.dapr.io/resource.name = ${name}
|
||||
//
|
||||
case *client.ClusteredResource:
|
||||
r := gvk.GroupVersion().String() + ":" + gvk.Kind
|
||||
|
||||
if _, ok := a.subscriptions[r]; !ok {
|
||||
a.l.Info("watch", "ref", r)
|
||||
|
||||
err = rc.Reconciler.Watch(
|
||||
&obj,
|
||||
rc.Reconciler.EnqueueRequestsFromMapFunc(labelsToRequest),
|
||||
dependantWithLabels(
|
||||
predicates.WithWatchUpdate(a.watchForUpdates(gvk)),
|
||||
predicates.WithWatchDeleted(true),
|
||||
predicates.WithWatchStatus(a.watchStatus(gvk)),
|
||||
),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.subscriptions[r] = struct{}{}
|
||||
}
|
||||
if !force {
|
||||
force = !a.installOnly(gvk)
|
||||
}
|
||||
|
||||
if installOnly {
|
||||
old, err := dc.Get(ctx, obj.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if !k8serrors.IsNotFound(err) {
|
||||
return fmt.Errorf("cannot get object %s: %w", resources.Ref(&obj), err)
|
||||
}
|
||||
}
|
||||
|
||||
if old != nil {
|
||||
//
|
||||
// Every time the template is rendered, the helm function genSignedCert kicks in and
|
||||
// re-generated certs which causes a number os side effects and makes the set-up quite
|
||||
// unstable. As consequence some resources are not meant to be watched and re-created
|
||||
// unless the Dapr CR generation changes (which means the Spec has changed) or the
|
||||
// resource impacted by the genSignedCert hook is deleted.
|
||||
//
|
||||
// Ideally on OpenShift it would be good to leverage the service serving certificates
|
||||
// capability.
|
||||
//
|
||||
// Related info:
|
||||
// - https://docs.openshift.com/container-platform/4.13/security/certificates/service-serving-certificate.html
|
||||
// - https://github.com/dapr/dapr/issues/3968
|
||||
// - https://github.com/dapr/dapr/issues/6500
|
||||
//
|
||||
a.l.Info("run",
|
||||
"apply", "false",
|
||||
"ref", resources.Ref(&obj),
|
||||
"reason", "resource marked as install-only")
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
_, err = dc.Apply(ctx, obj.GetName(), &obj, metav1.ApplyOptions{
|
||||
FieldManager: controller.FieldManager,
|
||||
Force: true,
|
||||
})
|
||||
|
||||
err = a.apply(ctx, rc, &obj, force)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot patch object %s: %w", resources.Ref(&obj), err)
|
||||
return err
|
||||
}
|
||||
|
||||
a.l.Info("run",
|
||||
"apply", "true",
|
||||
"gen", rc.Resource.Generation,
|
||||
"ref", resources.Ref(&obj))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -293,3 +181,152 @@ func (a *ApplyResourcesAction) installOnly(gvk schema.GroupVersionKind) bool {
|
|||
|
||||
return false
|
||||
}
|
||||
|
||||
//nolint:cyclop
|
||||
func (a *ApplyResourcesAction) apply(ctx context.Context, rc *ReconciliationRequest, obj *unstructured.Unstructured, force bool) error {
|
||||
dc, err := rc.Client.Dynamic(rc.Resource.Namespace, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create dynamic client: %w", err)
|
||||
}
|
||||
|
||||
switch dc.(type) {
|
||||
//
|
||||
// NamespacedResource: in this case, filtering with ownership can be implemented
|
||||
// as all the namespaced resources created by this controller have the Dapr CR as
|
||||
// an owner
|
||||
//
|
||||
case *client.NamespacedResource:
|
||||
if err := a.watchNamespaceScopeResource(rc, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//
|
||||
// ClusteredResource: in this case, ownership based filtering is not supported
|
||||
// as you cannot have a non namespaced owner. For such reason, the resource for
|
||||
// which a reconcile should be triggered can be identified by using the labels
|
||||
// added by the controller to all the generated resources
|
||||
//
|
||||
// helm.operator.dapr.io/resource.namespace = ${namespace}
|
||||
// helm.operator.dapr.io/resource.name = ${name}
|
||||
//
|
||||
case *client.ClusteredResource:
|
||||
if err := a.watchClusterScopeResource(rc, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !force {
|
||||
old, err := dc.Get(ctx, obj.GetName(), metav1.GetOptions{})
|
||||
if err != nil && !k8serrors.IsNotFound(err) {
|
||||
return fmt.Errorf("cannot get object %s: %w", resources.Ref(obj), err)
|
||||
}
|
||||
|
||||
if old != nil {
|
||||
//
|
||||
// Every time the template is rendered, the helm function genSignedCert kicks in and
|
||||
// re-generated certs which causes a number os side effects and makes the set-up quite
|
||||
// unstable. As consequence some resources are not meant to be watched and re-created
|
||||
// unless the Dapr CR generation changes (which means the Spec has changed) or the
|
||||
// resource impacted by the genSignedCert hook is deleted.
|
||||
//
|
||||
// Ideally on OpenShift it would be good to leverage the service serving certificates
|
||||
// capability.
|
||||
//
|
||||
// Related info:
|
||||
// - https://docs.openshift.com/container-platform/4.13/security/certificates/service-serving-certificate.html
|
||||
// - https://github.com/dapr/dapr/issues/3968
|
||||
// - https://github.com/dapr/dapr/issues/6500
|
||||
//
|
||||
a.l.Info("run",
|
||||
"apply", "false",
|
||||
"gen", rc.Resource.Generation,
|
||||
"ref", resources.Ref(obj),
|
||||
"reason", "resource marked as install-only")
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
_, err = dc.Apply(ctx, obj.GetName(), obj, metav1.ApplyOptions{
|
||||
FieldManager: controller.FieldManager,
|
||||
Force: true,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot patch object %s: %w", resources.Ref(obj), err)
|
||||
}
|
||||
|
||||
a.l.Info("run",
|
||||
"apply", "true",
|
||||
"gen", rc.Resource.Generation,
|
||||
"ref", resources.Ref(obj))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ApplyResourcesAction) watchNamespaceScopeResource(rc *ReconciliationRequest, obj *unstructured.Unstructured) error {
|
||||
gvk := obj.GroupVersionKind()
|
||||
|
||||
obj.SetOwnerReferences(resources.OwnerReferences(rc.Resource))
|
||||
obj.SetNamespace(rc.Resource.Namespace)
|
||||
|
||||
r := gvk.GroupVersion().String() + ":" + gvk.Kind
|
||||
|
||||
if _, ok := a.subscriptions[r]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := a.subscriptions[r]; !ok {
|
||||
a.l.Info("watch", "scope", "namespace", "ref", r)
|
||||
|
||||
err := rc.Reconciler.Watch(
|
||||
obj,
|
||||
rc.Reconciler.EnqueueRequestForOwner(&daprApi.DaprInstance{}, handler.OnlyControllerOwner()),
|
||||
dependantWithLabels(
|
||||
predicates.WithWatchUpdate(a.watchForUpdates(gvk)),
|
||||
predicates.WithWatchDeleted(true),
|
||||
predicates.WithWatchStatus(a.watchStatus(gvk)),
|
||||
),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.subscriptions[r] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ApplyResourcesAction) watchClusterScopeResource(rc *ReconciliationRequest, obj *unstructured.Unstructured) error {
|
||||
gvk := obj.GroupVersionKind()
|
||||
|
||||
r := gvk.GroupVersion().String() + ":" + gvk.Kind
|
||||
|
||||
if _, ok := a.subscriptions[r]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := a.subscriptions[r]; !ok {
|
||||
a.l.Info("watch", "scope", "cluster", "ref", r)
|
||||
|
||||
err := rc.Reconciler.Watch(
|
||||
obj,
|
||||
rc.Reconciler.EnqueueRequestsFromMapFunc(labelsToRequest),
|
||||
dependantWithLabels(
|
||||
predicates.WithWatchUpdate(a.watchForUpdates(gvk)),
|
||||
predicates.WithWatchDeleted(true),
|
||||
predicates.WithWatchStatus(a.watchStatus(gvk)),
|
||||
),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.subscriptions[r] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"github.com/dapr/kubernetes-operator/pkg/conditions"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
@ -35,69 +37,35 @@ func (a *ConditionsAction) Run(ctx context.Context, rc *ReconciliationRequest) e
|
|||
return fmt.Errorf("cannot compute current release selector: %w", err)
|
||||
}
|
||||
|
||||
// Deployments
|
||||
|
||||
deployments, err := rc.Client.AppsV1().Deployments(rc.Resource.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: crs.String(),
|
||||
})
|
||||
|
||||
deployments, readyDeployments, err := a.deployments(ctx, rc, crs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot list deployments: %w", err)
|
||||
return fmt.Errorf("cannot count deployments: %w", err)
|
||||
}
|
||||
|
||||
readyDeployments := 0
|
||||
|
||||
for i := range deployments.Items {
|
||||
if conditions.ConditionStatus(deployments.Items[i], appsv1.DeploymentAvailable) == corev1.ConditionTrue {
|
||||
readyDeployments++
|
||||
}
|
||||
}
|
||||
|
||||
// StatefulSets
|
||||
|
||||
statefulSets, err := rc.Client.AppsV1().StatefulSets(rc.Resource.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: crs.String(),
|
||||
})
|
||||
|
||||
statefulSets, readyReplicaSets, err := a.statefulSets(ctx, rc, crs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot list stateful sets: %w", err)
|
||||
}
|
||||
|
||||
readyReplicaSets := 0
|
||||
|
||||
for i := range statefulSets.Items {
|
||||
if statefulSets.Items[i].Status.Replicas == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if statefulSets.Items[i].Status.Replicas == statefulSets.Items[i].Status.ReadyReplicas {
|
||||
readyReplicaSets++
|
||||
}
|
||||
return fmt.Errorf("cannot count stateful sets: %w", err)
|
||||
}
|
||||
|
||||
var readyCondition metav1.Condition
|
||||
|
||||
if len(deployments.Items)+len(statefulSets.Items) > 0 {
|
||||
if readyDeployments+readyReplicaSets == len(deployments.Items)+len(statefulSets.Items) {
|
||||
readyCondition = metav1.Condition{
|
||||
Type: conditions.TypeReady,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Ready",
|
||||
ObservedGeneration: rc.Resource.Generation,
|
||||
Message: fmt.Sprintf("%d/%d deployments ready, statefulSets ready %d/%d",
|
||||
readyDeployments, len(deployments.Items),
|
||||
readyReplicaSets, len(statefulSets.Items)),
|
||||
}
|
||||
} else {
|
||||
readyCondition = metav1.Condition{
|
||||
Type: conditions.TypeReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "InProgress",
|
||||
ObservedGeneration: rc.Resource.Generation,
|
||||
Message: fmt.Sprintf("%d/%d deployments ready, statefulSets ready %d/%d",
|
||||
readyDeployments, len(deployments.Items),
|
||||
readyReplicaSets, len(statefulSets.Items)),
|
||||
}
|
||||
if deployments+statefulSets > 0 {
|
||||
reason := "Ready"
|
||||
status := metav1.ConditionTrue
|
||||
|
||||
if readyDeployments+readyReplicaSets != deployments+statefulSets {
|
||||
reason = "InProgress"
|
||||
status = metav1.ConditionFalse
|
||||
}
|
||||
|
||||
readyCondition = metav1.Condition{
|
||||
Type: conditions.TypeReady,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
ObservedGeneration: rc.Resource.Generation,
|
||||
Message: fmt.Sprintf("%d/%d deployments ready, statefulSets ready %d/%d",
|
||||
readyDeployments, deployments,
|
||||
readyReplicaSets, statefulSets),
|
||||
}
|
||||
} else {
|
||||
readyCondition = metav1.Condition{
|
||||
|
@ -117,3 +85,47 @@ func (a *ConditionsAction) Run(ctx context.Context, rc *ReconciliationRequest) e
|
|||
func (a *ConditionsAction) Cleanup(_ context.Context, _ *ReconciliationRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ConditionsAction) deployments(ctx context.Context, rc *ReconciliationRequest, selector labels.Selector) (int, int, error) {
|
||||
objects, err := rc.Client.AppsV1().Deployments(rc.Resource.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("cannot list deployments: %w", err)
|
||||
}
|
||||
|
||||
ready := 0
|
||||
|
||||
for i := range objects.Items {
|
||||
if conditions.ConditionStatus(objects.Items[i], appsv1.DeploymentAvailable) == corev1.ConditionTrue {
|
||||
ready++
|
||||
}
|
||||
}
|
||||
|
||||
return len(objects.Items), ready, nil
|
||||
}
|
||||
|
||||
func (a *ConditionsAction) statefulSets(ctx context.Context, rc *ReconciliationRequest, selector labels.Selector) (int, int, error) {
|
||||
objects, err := rc.Client.AppsV1().StatefulSets(rc.Resource.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("cannot list stateful sets: %w", err)
|
||||
}
|
||||
|
||||
ready := 0
|
||||
|
||||
for i := range objects.Items {
|
||||
if objects.Items[i].Status.Replicas == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if objects.Items[i].Status.Replicas == objects.Items[i].Status.ReadyReplicas {
|
||||
ready++
|
||||
}
|
||||
}
|
||||
|
||||
return len(objects.Items), ready, nil
|
||||
}
|
||||
|
|
|
@ -50,26 +50,16 @@ func ConditionStatus[T GenericConditionType](object any, conditionType T) corev1
|
|||
return corev1.ConditionStatus(c.Status)
|
||||
}
|
||||
case *appsv1.Deployment:
|
||||
if o != nil {
|
||||
for i := range o.Status.Conditions {
|
||||
if string(o.Status.Conditions[i].Type) == string(conditionType) {
|
||||
return o.Status.Conditions[i].Status
|
||||
}
|
||||
}
|
||||
if c, ok := FindDeploymentStatusCondition(o, string(conditionType)); ok {
|
||||
return c.Status
|
||||
}
|
||||
case appsv1.Deployment:
|
||||
for i := range o.Status.Conditions {
|
||||
if string(o.Status.Conditions[i].Type) == string(conditionType) {
|
||||
return o.Status.Conditions[i].Status
|
||||
}
|
||||
if c, ok := FindDeploymentStatusCondition(&o, string(conditionType)); ok {
|
||||
return c.Status
|
||||
}
|
||||
case *corev1.Pod:
|
||||
if o != nil {
|
||||
for i := range o.Status.Conditions {
|
||||
if string(o.Status.Conditions[i].Type) == string(conditionType) {
|
||||
return o.Status.Conditions[i].Status
|
||||
}
|
||||
}
|
||||
if c, ok := FindPodStatusCondition(o, string(conditionType)); ok {
|
||||
return c.Status
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -83,28 +73,46 @@ func ConditionReason[T GenericConditionType](object any, conditionType T) string
|
|||
return c.Reason
|
||||
}
|
||||
case *appsv1.Deployment:
|
||||
if o != nil {
|
||||
for i := range o.Status.Conditions {
|
||||
if string(o.Status.Conditions[i].Type) == string(conditionType) {
|
||||
return o.Status.Conditions[i].Reason
|
||||
}
|
||||
}
|
||||
if c, ok := FindDeploymentStatusCondition(o, string(conditionType)); ok {
|
||||
return c.Reason
|
||||
}
|
||||
case appsv1.Deployment:
|
||||
for i := range o.Status.Conditions {
|
||||
if string(o.Status.Conditions[i].Type) == string(conditionType) {
|
||||
return o.Status.Conditions[i].Reason
|
||||
}
|
||||
if c, ok := FindDeploymentStatusCondition(&o, string(conditionType)); ok {
|
||||
return c.Reason
|
||||
}
|
||||
case *corev1.Pod:
|
||||
if o != nil {
|
||||
for i := range o.Status.Conditions {
|
||||
if string(o.Status.Conditions[i].Type) == string(conditionType) {
|
||||
return o.Status.Conditions[i].Reason
|
||||
}
|
||||
}
|
||||
if c, ok := FindPodStatusCondition(o, string(conditionType)); ok {
|
||||
return c.Reason
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func FindDeploymentStatusCondition(in *appsv1.Deployment, conditionType string) (appsv1.DeploymentCondition, bool) {
|
||||
if in == nil {
|
||||
return appsv1.DeploymentCondition{}, false
|
||||
}
|
||||
|
||||
for i := range in.Status.Conditions {
|
||||
if string(in.Status.Conditions[i].Type) == conditionType {
|
||||
return in.Status.Conditions[i], true
|
||||
}
|
||||
}
|
||||
|
||||
return appsv1.DeploymentCondition{}, false
|
||||
}
|
||||
|
||||
func FindPodStatusCondition(in *corev1.Pod, conditionType string) (corev1.PodCondition, bool) {
|
||||
if in == nil {
|
||||
return corev1.PodCondition{}, false
|
||||
}
|
||||
|
||||
for i := range in.Status.Conditions {
|
||||
if string(in.Status.Conditions[i].Type) == conditionType {
|
||||
return in.Status.Conditions[i], true
|
||||
}
|
||||
}
|
||||
|
||||
return corev1.PodCondition{}, false
|
||||
}
|
||||
|
|
|
@ -93,48 +93,62 @@ func (gc *GC) deleteEachOf(
|
|||
}
|
||||
|
||||
for i := range items.Items {
|
||||
resource := items.Items[i]
|
||||
|
||||
if !gc.canBeDeleted(ctx, resource.GroupVersionKind()) {
|
||||
continue
|
||||
}
|
||||
|
||||
canBeDeleted, err := predicate(ctx, resource)
|
||||
ok, err := gc.delete(ctx, c, items.Items[i], predicate)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !canBeDeleted {
|
||||
continue
|
||||
if ok {
|
||||
deleted++
|
||||
}
|
||||
|
||||
gc.l.Info("deleting", "ref", resources.Ref(&resource))
|
||||
|
||||
err = c.Delete(ctx, &resource, ctrlCli.PropagationPolicy(metav1.DeletePropagationForeground))
|
||||
if err != nil {
|
||||
// The resource may have already been deleted
|
||||
if !k8serrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf(
|
||||
"cannot delete resources gvk:%s, namespace: %s, name: %s, err: %w",
|
||||
resource.GroupVersionKind().String(),
|
||||
resource.GetNamespace(),
|
||||
resource.GetName(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
gc.l.Info("deleted", "ref", resources.Ref(&resource))
|
||||
|
||||
deleted++
|
||||
}
|
||||
}
|
||||
|
||||
return deleted, nil
|
||||
}
|
||||
|
||||
func (gc *GC) delete(
|
||||
ctx context.Context,
|
||||
c *client.Client,
|
||||
resource unstructured.Unstructured,
|
||||
predicate func(context.Context, unstructured.Unstructured) (bool, error),
|
||||
) (bool, error) {
|
||||
if !gc.canBeDeleted(ctx, resource.GroupVersionKind()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
canBeDeleted, err := predicate(ctx, resource)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !canBeDeleted {
|
||||
return false, err
|
||||
}
|
||||
|
||||
gc.l.Info("deleting", "ref", resources.Ref(&resource))
|
||||
|
||||
err = c.Delete(ctx, &resource, ctrlCli.PropagationPolicy(metav1.DeletePropagationForeground))
|
||||
if err != nil {
|
||||
// The resource may have already been deleted
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf(
|
||||
"cannot delete resources gvk:%s, namespace: %s, name: %s, err: %w",
|
||||
resource.GroupVersionKind().String(),
|
||||
resource.GetNamespace(),
|
||||
resource.GetName(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
gc.l.Info("deleted", "ref", resources.Ref(&resource))
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (gc *GC) canBeDeleted(_ context.Context, gvk schema.GroupVersionKind) bool {
|
||||
if gvk.Group == "coordination.k8s.io" && gvk.Kind == "Lease" {
|
||||
return false
|
||||
|
@ -143,6 +157,7 @@ func (gc *GC) canBeDeleted(_ context.Context, gvk schema.GroupVersionKind) bool
|
|||
return true
|
||||
}
|
||||
|
||||
//nolint:cyclop
|
||||
func (gc *GC) computeDeletableTypes(ctx context.Context, c *client.Client, ns string) error {
|
||||
// Rate limit to avoid Discovery and SelfSubjectRulesReview requests at every reconciliation.
|
||||
if !gc.limiter.Allow() {
|
||||
|
|
|
@ -76,7 +76,7 @@ type BaseReconciler[T controller.ResourceObject] struct {
|
|||
Client ctrlClient.Client
|
||||
}
|
||||
|
||||
//nolint:forcetypeassert,wrapcheck,nestif
|
||||
//nolint:forcetypeassert,wrapcheck,nestif,cyclop
|
||||
func (s *BaseReconciler[T]) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
res := reflect.New(reflect.TypeOf(*new(T)).Elem()).Interface().(T)
|
||||
if err := s.Client.Get(ctx, req.NamespacedName, res); err != nil {
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
//nolint:cyclop
|
||||
func runCleanup(t Test, in runtime.Object) error {
|
||||
un, err := resources.ToUnstructured(t.Client().Scheme(), in)
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue