Merge pull request #5530 from a7i/skip-interpret-health
skip interpret health of resources without a hook
This commit is contained in:
commit
f656d9a2a0
|
@ -38,6 +38,7 @@ import (
|
||||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||||
|
|
||||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||||
|
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
|
||||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||||
"github.com/karmada-io/karmada/pkg/events"
|
"github.com/karmada-io/karmada/pkg/events"
|
||||||
|
@ -365,20 +366,7 @@ func (c *WorkStatusController) reflectStatus(ctx context.Context, work *workv1al
|
||||||
}
|
}
|
||||||
c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonReflectStatusSucceed, "Reflect status for object(%s/%s/%s) succeed.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName())
|
c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonReflectStatusSucceed, "Reflect status for object(%s/%s/%s) succeed.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName())
|
||||||
|
|
||||||
var resourceHealth workv1alpha1.ResourceHealth
|
resourceHealth := c.interpretHealth(clusterObj, work)
|
||||||
// When an unregistered resource kind is requested with the ResourceInterpreter,
|
|
||||||
// the interpreter will return an error, we treat its health status as Unknown.
|
|
||||||
healthy, err := c.ResourceInterpreter.InterpretHealth(clusterObj)
|
|
||||||
if err != nil {
|
|
||||||
resourceHealth = workv1alpha1.ResourceUnknown
|
|
||||||
c.EventRecorder.Eventf(work, corev1.EventTypeWarning, events.EventReasonInterpretHealthFailed, "Interpret health of object(%s/%s/%s) failed, err: %s.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), err.Error())
|
|
||||||
} else if healthy {
|
|
||||||
resourceHealth = workv1alpha1.ResourceHealthy
|
|
||||||
c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as healthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName())
|
|
||||||
} else {
|
|
||||||
resourceHealth = workv1alpha1.ResourceUnhealthy
|
|
||||||
c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as unhealthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName())
|
|
||||||
}
|
|
||||||
|
|
||||||
identifier, err := c.buildStatusIdentifier(work, clusterObj)
|
identifier, err := c.buildStatusIdentifier(work, clusterObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -400,6 +388,28 @@ func (c *WorkStatusController) reflectStatus(ctx context.Context, work *workv1al
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *WorkStatusController) interpretHealth(clusterObj *unstructured.Unstructured, work *workv1alpha1.Work) workv1alpha1.ResourceHealth {
|
||||||
|
// For kind that doesn't have health check, we treat it as healthy.
|
||||||
|
if !c.ResourceInterpreter.HookEnabled(clusterObj.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretHealth) {
|
||||||
|
klog.V(5).Infof("skipping health assessment for object: %v %s/%s as missing customization and will treat it as healthy.", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName())
|
||||||
|
return workv1alpha1.ResourceHealthy
|
||||||
|
}
|
||||||
|
|
||||||
|
var resourceHealth workv1alpha1.ResourceHealth
|
||||||
|
healthy, err := c.ResourceInterpreter.InterpretHealth(clusterObj)
|
||||||
|
if err != nil {
|
||||||
|
resourceHealth = workv1alpha1.ResourceUnknown
|
||||||
|
c.EventRecorder.Eventf(work, corev1.EventTypeWarning, events.EventReasonInterpretHealthFailed, "Interpret health of object(%s/%s/%s) failed, err: %s.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), err.Error())
|
||||||
|
} else if healthy {
|
||||||
|
resourceHealth = workv1alpha1.ResourceHealthy
|
||||||
|
c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as healthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName())
|
||||||
|
} else {
|
||||||
|
resourceHealth = workv1alpha1.ResourceUnhealthy
|
||||||
|
c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as unhealthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName())
|
||||||
|
}
|
||||||
|
return resourceHealth
|
||||||
|
}
|
||||||
|
|
||||||
func (c *WorkStatusController) buildStatusIdentifier(work *workv1alpha1.Work, clusterObj *unstructured.Unstructured) (*workv1alpha1.ResourceIdentifier, error) {
|
func (c *WorkStatusController) buildStatusIdentifier(work *workv1alpha1.Work, clusterObj *unstructured.Unstructured) (*workv1alpha1.ResourceIdentifier, error) {
|
||||||
manifestRef := helper.ManifestReference{APIVersion: clusterObj.GetAPIVersion(), Kind: clusterObj.GetKind(),
|
manifestRef := helper.ManifestReference{APIVersion: clusterObj.GetAPIVersion(), Kind: clusterObj.GetKind(),
|
||||||
Namespace: clusterObj.GetNamespace(), Name: clusterObj.GetName()}
|
Namespace: clusterObj.GetNamespace(), Name: clusterObj.GetName()}
|
||||||
|
|
|
@ -25,22 +25,26 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
dynamicfake "k8s.io/client-go/dynamic/fake"
|
dynamicfake "k8s.io/client-go/dynamic/fake"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||||
|
|
||||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||||
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
|
||||||
|
"github.com/karmada-io/karmada/pkg/events"
|
||||||
"github.com/karmada-io/karmada/pkg/resourceinterpreter/default/native"
|
"github.com/karmada-io/karmada/pkg/resourceinterpreter/default/native"
|
||||||
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
|
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
|
||||||
"github.com/karmada-io/karmada/pkg/util"
|
"github.com/karmada-io/karmada/pkg/util"
|
||||||
|
@ -723,6 +727,9 @@ func newWorkStatusController(cluster *clusterv1alpha1.Cluster, dynamicClientSets
|
||||||
m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace)
|
m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace)
|
||||||
return m
|
return m
|
||||||
}(),
|
}(),
|
||||||
|
ResourceInterpreter: FakeResourceInterpreter{
|
||||||
|
DefaultInterpreter: native.NewDefaultInterpreter(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dynamicClientSets) > 0 {
|
if len(dynamicClientSets) > 0 {
|
||||||
|
@ -1021,3 +1028,47 @@ func TestWorkStatusController_registerInformersAndStart(t *testing.T) {
|
||||||
assert.NotEmpty(t, err)
|
assert.NotEmpty(t, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWorkStatusController_interpretHealth(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
clusterObj client.Object
|
||||||
|
expectedResourceHealth workv1alpha1.ResourceHealth
|
||||||
|
expectedEventReason string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "deployment without status is interpreted as unhealthy",
|
||||||
|
clusterObj: testhelper.NewDeployment("foo", "bar"),
|
||||||
|
expectedResourceHealth: workv1alpha1.ResourceUnhealthy,
|
||||||
|
expectedEventReason: events.EventReasonInterpretHealthSucceed,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cluster role without status is interpreted as healthy",
|
||||||
|
clusterObj: testhelper.NewClusterRole("foo", []rbacv1.PolicyRule{}),
|
||||||
|
expectedResourceHealth: workv1alpha1.ResourceHealthy,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster := newCluster("cluster", clusterv1alpha1.ClusterConditionReady, metav1.ConditionTrue)
|
||||||
|
c := newWorkStatusController(cluster)
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
work := testhelper.NewWork(tt.clusterObj.GetName(), tt.clusterObj.GetNamespace(), string(uuid.NewUUID()), []byte{})
|
||||||
|
obj, err := helper.ToUnstructured(tt.clusterObj)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
resourceHealth := c.interpretHealth(obj, work)
|
||||||
|
assert.Equalf(t, tt.expectedResourceHealth, resourceHealth, "expected resource health %v, got %v", tt.expectedResourceHealth, resourceHealth)
|
||||||
|
|
||||||
|
eventRecorder := c.EventRecorder.(*record.FakeRecorder)
|
||||||
|
if tt.expectedEventReason == "" {
|
||||||
|
assert.Empty(t, eventRecorder.Events, "expected no events to get recorded")
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, 1, len(eventRecorder.Events))
|
||||||
|
e := <-eventRecorder.Events
|
||||||
|
assert.Containsf(t, e, tt.expectedEventReason, "expected event reason %v, got %v", tt.expectedEventReason, e)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue