Improve readability of the wait conditions individually
Signed-off-by: Danil-Grigorev <danil.grigorev@suse.com>
This commit is contained in:
parent
392513cfe6
commit
7937262daa
18
justfile
18
justfile
|
|
@ -94,7 +94,7 @@ start-dev: _cleanup-out-dir _create-out-dir _download-kubectl
|
|||
kind delete cluster --name dev || true
|
||||
kind create cluster --image=kindest/node:v{{KUBE_VERSION}} --config testdata/kind-config.yaml
|
||||
just install-capi
|
||||
kubectl wait pods --for=condition=Ready --timeout=300s --all --all-namespaces
|
||||
kubectl wait pods --for=condition=Ready --timeout=500s --all --all-namespaces
|
||||
|
||||
# Stop the local dev environment
|
||||
stop-dev:
|
||||
|
|
@ -171,13 +171,13 @@ release-manifests: _create-out-dir _download-kustomize
|
|||
test-import: start-dev deploy deploy-child-cluster deploy-kindnet deploy-app && collect-test-import
|
||||
kubectl wait pods --for=condition=Ready --timeout=150s --all --all-namespaces
|
||||
kubectl wait cluster --timeout=500s --for=condition=ControlPlaneReady=true docker-demo
|
||||
kubectl wait clusters.fleet.cattle.io --timeout=300s --for=condition=Ready=true docker-demo
|
||||
kubectl wait clusters.fleet.cattle.io --timeout=500s --for=condition=Ready=true docker-demo
|
||||
|
||||
# Full e2e test of importing cluster in fleet
|
||||
test-import-rke2: start-dev deploy deploy-child-rke2-cluster deploy-calico-gitrepo deploy-app
|
||||
kubectl wait pods --for=condition=Ready --timeout=150s --all --all-namespaces
|
||||
kubectl wait cluster --timeout=500s --for=condition=ControlPlaneReady=true docker-demo
|
||||
kubectl wait clusters.fleet.cattle.io --timeout=300s --for=condition=Ready=true docker-demo
|
||||
kubectl wait clusters.fleet.cattle.io --timeout=500s --for=condition=Ready=true docker-demo
|
||||
|
||||
collect-test-import:
|
||||
-just collect-artifacts dev
|
||||
|
|
@ -206,11 +206,15 @@ collect-artifacts cluster:
|
|||
# Full e2e test of importing cluster and ClusterClass in fleet
|
||||
[private]
|
||||
_test-import-all:
|
||||
kubectl wait clustergroups.fleet.cattle.io -n clusterclass --timeout=300s --for=create --for=condition=Ready=true quick-start
|
||||
kubectl wait clustergroups.fleet.cattle.io -n clusterclass --timeout=500s --for=create quick-start
|
||||
kubectl wait clustergroups.fleet.cattle.io -n clusterclass --timeout=500s --for=condition=Ready=true quick-start
|
||||
# Verify that cluster group created for cluster referencing clusterclass in a different namespace
|
||||
kubectl wait bundlenamespacemappings.fleet.cattle.io --timeout=300s --for=create -n clusterclass default
|
||||
kubectl wait clustergroups.fleet.cattle.io --timeout=300s --for=create --for=jsonpath='{.status.clusterCount}=1' --for=condition=Ready=true quick-start.clusterclass
|
||||
kubectl wait clusters.fleet.cattle.io --timeout=300s --for=create --for=condition=Ready=true capi-quickstart
|
||||
kubectl wait bundlenamespacemappings.fleet.cattle.io --timeout=500s --for=create -n clusterclass default
|
||||
kubectl wait clustergroups.fleet.cattle.io --timeout=500s --for=create quick-start.clusterclass
|
||||
kubectl wait clustergroups.fleet.cattle.io --timeout=500s --for=jsonpath='{.status.clusterCount}=1' quick-start.clusterclass
|
||||
kubectl wait clustergroups.fleet.cattle.io --timeout=500s --for=condition=Ready=true quick-start.clusterclass
|
||||
kubectl wait clusters.fleet.cattle.io --timeout=500s --for=create capi-quickstart
|
||||
kubectl wait clusters.fleet.cattle.io --timeout=500s --for=condition=Ready=true capi-quickstart
|
||||
|
||||
[private]
|
||||
_test-delete-all:
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ pub struct BundleNamespaceMapping {
|
|||
|
||||
impl ResourceDiff for BundleNamespaceMapping {
|
||||
fn diff(&self, other: &Self) -> bool {
|
||||
self.bundle_selector != other.bundle_selector
|
||||
|| self.namespace_selector != other.namespace_selector
|
||||
self != other
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,23 @@ impl ResourceDiff for Cluster {
|
|||
return true;
|
||||
}
|
||||
|
||||
let spec_equal = self.spec.template_values == other.spec.template_values
|
||||
let template_values_equal = self
|
||||
.spec
|
||||
.template_values
|
||||
.as_ref()
|
||||
.unwrap_or(&std::collections::BTreeMap::new())
|
||||
.iter()
|
||||
.all(|(k, v)| {
|
||||
other
|
||||
.spec
|
||||
.template_values
|
||||
.as_ref()
|
||||
.unwrap_or(&std::collections::BTreeMap::new())
|
||||
.get(k)
|
||||
== Some(v)
|
||||
});
|
||||
|
||||
let spec_equal = template_values_equal
|
||||
&& self.spec.agent_namespace == other.spec.agent_namespace
|
||||
&& self.spec.host_network == other.spec.host_network
|
||||
&& self.spec.agent_env_vars == other.spec.agent_env_vars
|
||||
|
|
|
|||
|
|
@ -515,13 +515,13 @@ pub type ReconcileConfigSyncResult<T> = std::result::Result<T, ReconcileConfigSy
|
|||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ReconcileConfigSyncError {
|
||||
#[error("Kube Error: {0}")]
|
||||
KubeError(#[from] kube::Error),
|
||||
#[error("Fleet config map fetch error: {0}")]
|
||||
FleetConfigFetch(#[from] kube::Error),
|
||||
|
||||
#[error("Addon config sync error: {0}")]
|
||||
AddonConfigSync(#[from] AddonConfigSyncError),
|
||||
|
||||
#[error("Patch error: {0}")]
|
||||
#[error("Fleet config map patch error: {0}")]
|
||||
Patch(#[from] PatchError),
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue