fix bugs of aggregating job status

Signed-off-by: Garrybest <garrybest@foxmail.com>
This commit is contained in:
Garrybest 2022-06-10 17:46:34 +08:00
parent 6ae45f8618
commit 3ad6f31a7b
4 changed files with 13 additions and 30 deletions

View File

@ -160,6 +160,11 @@ func aggregateJobStatus(object *unstructured.Unstructured, aggregatedStatusItems
return nil, err
}
// If a job is finished, we should never update status again.
if finished, _ := helper.GetJobFinishedStatus(&job.Status); finished {
return object, nil
}
newStatus, err := helper.ParsingJobStatus(job, aggregatedStatusItems)
if err != nil {
return nil, err

View File

@ -146,6 +146,11 @@ func (i *customResourceInterpreterImpl) Retain(desired *unstructured.Unstructure
func (i *customResourceInterpreterImpl) AggregateStatus(object *unstructured.Unstructured, aggregatedStatusItems []workv1alpha2.AggregatedStatusItem) (*unstructured.Unstructured, error) {
klog.V(4).Infof("Begin to aggregate status for object: %v %s/%s.", object.GroupVersionKind(), object.GetNamespace(), object.GetName())
// If status has not been collected, there is no need to aggregate.
if len(aggregatedStatusItems) == 0 {
return object, nil
}
obj, hookEnabled, err := i.customizedInterpreter.Patch(context.TODO(), &webhook.RequestAttributes{
Operation: configv1alpha1.InterpreterOperationAggregateStatus,
Object: object.DeepCopy(),

View File

@ -36,7 +36,7 @@ func ParsingJobStatus(obj *batchv1.Job, status []workv1alpha2.AggregatedStatusIt
newStatus.Succeeded += temp.Succeeded
newStatus.Failed += temp.Failed
isFinished, finishedStatus := getJobFinishedStatus(temp)
isFinished, finishedStatus := GetJobFinishedStatus(temp)
if isFinished && finishedStatus == batchv1.JobComplete {
successfulJobs++
} else if isFinished && finishedStatus == batchv1.JobFailed {
@ -88,9 +88,9 @@ func ParsingJobStatus(obj *batchv1.Job, status []workv1alpha2.AggregatedStatusIt
return newStatus, nil
}
// getJobFinishedStatus checks whether the given Job has finished execution.
// GetJobFinishedStatus checks whether the given Job has finished execution.
// It does not discriminate between successful and failed terminations.
func getJobFinishedStatus(jobStatus *batchv1.JobStatus) (bool, batchv1.JobConditionType) {
func GetJobFinishedStatus(jobStatus *batchv1.JobStatus) (bool, batchv1.JobConditionType) {
for _, c := range jobStatus.Conditions {
if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == corev1.ConditionTrue {
return true, c.Type

View File

@ -331,7 +331,6 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
ginkgo.Context("JobStatus collection testing", func() {
var jobNamespace, jobName string
var job *batchv1.Job
var patch []map[string]interface{}
ginkgo.BeforeEach(func() {
policyNamespace = testNamespace
@ -352,13 +351,6 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
},
})
patch = []map[string]interface{}{
{
"op": "replace",
"path": "/spec/placement/clusterAffinity/clusterNames",
"value": framework.ClusterNames()[0 : len(framework.ClusterNames())-1],
},
}
})
ginkgo.BeforeEach(func() {
@ -386,25 +378,6 @@ var _ = ginkgo.Describe("[resource-status collection] resource status collection
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
framework.PatchPropagationPolicy(karmadaClient, policy.Namespace, policyName, patch, types.JSONPatchType)
ginkgo.By("check if job status has been update with new collection", func() {
wantedSucceedPods := int32(len(framework.Clusters()) - 1)
klog.Infof("Waiting for job(%s/%s) collecting correctly status", jobNamespace, jobName)
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
currentJob, err := kubeClient.BatchV1().Jobs(jobNamespace).Get(context.TODO(), jobName, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
if currentJob.Status.Succeeded == wantedSucceedPods {
return true, nil
}
return false, nil
})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
})
})