From 3d6637fc329a4d85f750bd9ad729e9f30fa19daf Mon Sep 17 00:00:00 2001 From: wawa0210 Date: Thu, 12 Aug 2021 10:59:23 +0800 Subject: [PATCH] Delete the extra getClusterHealthStatus logic and use the wait.Poll framework directly Signed-off-by: wawa0210 --- .../status/cluster_status_controller.go | 30 ++++++++----------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/pkg/controllers/status/cluster_status_controller.go b/pkg/controllers/status/cluster_status_controller.go index 4bf29c86b..2435842d4 100644 --- a/pkg/controllers/status/cluster_status_controller.go +++ b/pkg/controllers/status/cluster_status_controller.go @@ -131,26 +131,22 @@ func (c *ClusterStatusController) syncClusterStatus(cluster *v1alpha1.Cluster) ( var currentClusterStatus = v1alpha1.ClusterStatus{} - // get the health status of member cluster - online, healthy := getClusterHealthStatus(clusterClient) - + var online, healthy bool // in case of cluster offline, retry a few times to avoid network unstable problems. // Note: retry timeout should not be too long, otherwise will block other cluster reconcile. - if !online { - err := wait.Poll(clusterStatusRetryInterval, clusterStatusRetryTimeout, func() (done bool, err error) { - online, healthy = getClusterHealthStatus(clusterClient) - if !online { - return false, nil - } - klog.V(2).Infof("Cluster(%s) back to online after retry.", cluster.Name) - return true, nil - }) - // error indicates that retry timeout, update cluster status immediately and return. - if err != nil { - currentClusterStatus.Conditions = generateReadyCondition(false, false) - setTransitionTime(&cluster.Status, ¤tClusterStatus) - return c.updateStatusIfNeeded(cluster, currentClusterStatus) + err = wait.PollImmediate(clusterStatusRetryInterval, clusterStatusRetryTimeout, func() (done bool, err error) { + online, healthy = getClusterHealthStatus(clusterClient) + if !online { + return false, nil } + klog.V(2).Infof("Cluster(%s) back to online after retry.", cluster.Name) + return true, nil + }) + // error indicates that retry timeout, update cluster status immediately and return. + if err != nil { + currentClusterStatus.Conditions = generateReadyCondition(false, false) + setTransitionTime(&cluster.Status, ¤tClusterStatus) + return c.updateStatusIfNeeded(cluster, currentClusterStatus) } clusterVersion, err := getKubernetesVersion(clusterClient)