add e2e test for lazy propagation policy
Signed-off-by: chaosi-zju <chaosi@zju.edu.cn>
This commit is contained in:
parent
f369c55cf7
commit
f99c0f6df0
|
@ -174,6 +174,26 @@ func UpdateDeploymentAnnotations(client kubernetes.Interface, deployment *appsv1
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AppendDeploymentAnnotations append deployment's annotations.
|
||||||
|
func AppendDeploymentAnnotations(client kubernetes.Interface, deployment *appsv1.Deployment, annotations map[string]string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Appending Deployment(%s/%s)'s annotations to %v", deployment.Namespace, deployment.Name, annotations), func() {
|
||||||
|
gomega.Eventually(func() error {
|
||||||
|
deploy, err := client.AppsV1().Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if deploy.Annotations == nil {
|
||||||
|
deploy.Annotations = make(map[string]string, 0)
|
||||||
|
}
|
||||||
|
for k, v := range annotations {
|
||||||
|
deploy.Annotations[k] = v
|
||||||
|
}
|
||||||
|
_, err = client.AppsV1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
|
||||||
|
return err
|
||||||
|
}, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateDeploymentLabels update deployment's labels.
|
// UpdateDeploymentLabels update deployment's labels.
|
||||||
func UpdateDeploymentLabels(client kubernetes.Interface, deployment *appsv1.Deployment, labels map[string]string) {
|
func UpdateDeploymentLabels(client kubernetes.Interface, deployment *appsv1.Deployment, labels map[string]string) {
|
||||||
ginkgo.By(fmt.Sprintf("Updating Deployment(%s/%s)'s labels to %v", deployment.Namespace, deployment.Name, labels), func() {
|
ginkgo.By(fmt.Sprintf("Updating Deployment(%s/%s)'s labels to %v", deployment.Namespace, deployment.Name, labels), func() {
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
|
@ -46,6 +47,22 @@ func RemovePropagationPolicy(client karmada.Interface, namespace, name string) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RemovePropagationPolicyIfExist delete PropagationPolicy if it exists with karmada client.
|
||||||
|
func RemovePropagationPolicyIfExist(client karmada.Interface, namespace, name string) {
|
||||||
|
ginkgo.By(fmt.Sprintf("Removing PropagationPolicy(%s/%s) if it exists", namespace, name), func() {
|
||||||
|
_, err := client.PolicyV1alpha1().PropagationPolicies(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.PolicyV1alpha1().PropagationPolicies(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// PatchPropagationPolicy patch PropagationPolicy with karmada client.
|
// PatchPropagationPolicy patch PropagationPolicy with karmada client.
|
||||||
func PatchPropagationPolicy(client karmada.Interface, namespace, name string, patch []map[string]interface{}, patchType types.PatchType) {
|
func PatchPropagationPolicy(client karmada.Interface, namespace, name string, patch []map[string]interface{}, patchType types.PatchType) {
|
||||||
ginkgo.By(fmt.Sprintf("Patching PropagationPolicy(%s/%s)", namespace, name), func() {
|
ginkgo.By(fmt.Sprintf("Patching PropagationPolicy(%s/%s)", namespace, name), func() {
|
||||||
|
@ -68,3 +85,14 @@ func UpdatePropagationPolicyWithSpec(client karmada.Interface, namespace, name s
|
||||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitPropagationPolicyFitWith wait PropagationPolicy sync with fit func.
|
||||||
|
func WaitPropagationPolicyFitWith(client karmada.Interface, namespace, name string, fit func(policy *policyv1alpha1.PropagationPolicy) bool) {
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
policy, err := client.PolicyV1alpha1().PropagationPolicies(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return fit(policy)
|
||||||
|
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||||
|
}
|
||||||
|
|
|
@ -17,8 +17,11 @@ limitations under the License.
|
||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
|
|
||||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||||
|
@ -26,48 +29,237 @@ import (
|
||||||
testhelper "github.com/karmada-io/karmada/test/helper"
|
testhelper "github.com/karmada-io/karmada/test/helper"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const waitIntervalForLazyPolicyTest = 3 * time.Second
|
||||||
|
|
||||||
|
// e2e test for https://github.com/karmada-io/karmada/blob/master/docs/proposals/scheduling/activation-preference/lazy-activation-preference.md#test-plan
|
||||||
var _ = ginkgo.Describe("Lazy activation policy testing", func() {
|
var _ = ginkgo.Describe("Lazy activation policy testing", func() {
|
||||||
ginkgo.Context("Policy created before resource testing", func() {
|
var namespace string
|
||||||
var policy *policyv1alpha1.PropagationPolicy
|
var deploymentName, configMapName, policyName string
|
||||||
var deployment *appsv1.Deployment
|
var originalCluster, modifiedCluster string
|
||||||
var targetMember string
|
var deployment *appsv1.Deployment
|
||||||
|
var configMap *corev1.ConfigMap
|
||||||
|
var policy *policyv1alpha1.PropagationPolicy
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
targetMember = framework.ClusterNames()[0]
|
namespace = testNamespace
|
||||||
policyNamespace := testNamespace
|
deploymentName = deploymentNamePrefix + rand.String(RandomStrLength)
|
||||||
policyName := deploymentNamePrefix + rand.String(RandomStrLength)
|
configMapName = deploymentName
|
||||||
|
policyName = deploymentName
|
||||||
|
originalCluster = framework.ClusterNames()[0]
|
||||||
|
modifiedCluster = framework.ClusterNames()[1]
|
||||||
|
|
||||||
deployment = testhelper.NewDeployment(testNamespace, policyName)
|
deployment = testhelper.NewDeployment(namespace, deploymentName)
|
||||||
|
configMap = testhelper.NewConfigMap(namespace, configMapName, map[string]string{"test": "test"})
|
||||||
policy = testhelper.NewLazyPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
|
policy = testhelper.NewLazyPropagationPolicy(namespace, policyName, []policyv1alpha1.ResourceSelector{
|
||||||
{
|
{
|
||||||
APIVersion: deployment.APIVersion,
|
APIVersion: deployment.APIVersion,
|
||||||
Kind: deployment.Kind,
|
Kind: deployment.Kind,
|
||||||
Name: deployment.Name,
|
Name: deploymentName,
|
||||||
}}, policyv1alpha1.Placement{
|
}}, policyv1alpha1.Placement{
|
||||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||||
ClusterNames: []string{targetMember},
|
ClusterNames: []string{originalCluster},
|
||||||
},
|
},
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.Context("1. Policy created before resource", func() {
|
||||||
|
ginkgo.JustBeforeEach(func() {
|
||||||
framework.CreatePropagationPolicy(karmadaClient, policy)
|
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||||
|
waitPropagatePolicyReconciled(namespace, policyName)
|
||||||
framework.CreateDeployment(kubeClient, deployment)
|
framework.CreateDeployment(kubeClient, deployment)
|
||||||
ginkgo.DeferCleanup(func() {
|
|
||||||
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
|
|
||||||
})
|
|
||||||
|
|
||||||
framework.WaitDeploymentPresentOnClusterFitWith(targetMember, deployment.Namespace, deployment.Name,
|
ginkgo.DeferCleanup(func() {
|
||||||
func(deployment *appsv1.Deployment) bool { return true })
|
framework.RemovePropagationPolicyIfExist(karmadaClient, namespace, policyName)
|
||||||
|
framework.RemoveDeployment(kubeClient, namespace, deploymentName)
|
||||||
|
framework.WaitDeploymentDisappearOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("policy created before resource testing", func() {
|
// Simple Case 1 (Policy created before resource)
|
||||||
framework.WaitDeploymentPresentOnClusterFitWith(targetMember, deployment.Namespace, deployment.Name,
|
// refer: https://github.com/karmada-io/karmada/blob/master/docs/proposals/scheduling/activation-preference/lazy-activation-preference.md#simple-case-1-policy-created-before-resource
|
||||||
func(deployment *appsv1.Deployment) bool { return true })
|
ginkgo.It("Simple Case 1 (Policy created before resource)", func() {
|
||||||
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
|
ginkgo.By("step 1: deployment propagate success when policy created before it", func() {
|
||||||
framework.WaitDeploymentPresentOnClusterFitWith(targetMember, deployment.Namespace, deployment.Name,
|
waitDeploymentPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
func(deployment *appsv1.Deployment) bool { return true })
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step 2: after policy deleted, deployment still keep previous propagation states", func() {
|
||||||
|
framework.RemovePropagationPolicy(karmadaClient, namespace, policyName)
|
||||||
|
// wait to distinguish whether the state will not change or have no time to change
|
||||||
|
time.Sleep(waitIntervalForLazyPolicyTest)
|
||||||
|
waitDeploymentPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("Propagate dependencies", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
policy.Spec.PropagateDeps = true
|
||||||
|
mountConfigMapToDeployment(deployment, configMapName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.JustBeforeEach(func() {
|
||||||
|
framework.CreateConfigMap(kubeClient, configMap)
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
framework.RemoveConfigMap(kubeClient, namespace, configMapName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// Combined Case 5 (Propagate dependencies)
|
||||||
|
// refer: https://github.com/karmada-io/karmada/blob/master/docs/proposals/scheduling/activation-preference/lazy-activation-preference.md#combined-case-5-propagate-dependencies
|
||||||
|
ginkgo.It("Combined Case 5 (Propagate dependencies)", func() {
|
||||||
|
ginkgo.By("step 1: deployment and its dependencies could propagate success.", func() {
|
||||||
|
waitDeploymentPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
waitConfigMapPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step 2: change of lazy policy will not take effect", func() {
|
||||||
|
changePlacementTargetCluster(policy, modifiedCluster)
|
||||||
|
// wait to distinguish whether the policy will not take effect or have no time to take effect
|
||||||
|
time.Sleep(waitIntervalForLazyPolicyTest)
|
||||||
|
waitDeploymentPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
waitConfigMapPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step 3: lazy policy take effect when deployment updated, dependencies can also been propagated.", func() {
|
||||||
|
updateDeploymentManually(deployment)
|
||||||
|
waitDeploymentPresentOnCluster(modifiedCluster, namespace, deploymentName)
|
||||||
|
waitConfigMapPresentOnCluster(modifiedCluster, namespace, deploymentName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("2. Policy created after resource", func() {
|
||||||
|
ginkgo.JustBeforeEach(func() {
|
||||||
|
framework.CreateDeployment(kubeClient, deployment)
|
||||||
|
waitDeploymentReconciled(namespace, deploymentName)
|
||||||
|
framework.CreatePropagationPolicy(karmadaClient, policy)
|
||||||
|
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
framework.RemovePropagationPolicy(karmadaClient, namespace, policyName)
|
||||||
|
framework.RemoveDeployment(kubeClient, namespace, deploymentName)
|
||||||
|
framework.WaitDeploymentDisappearOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// Simple Case 2 (Policy created after resource)
|
||||||
|
// refer: https://github.com/karmada-io/karmada/blob/master/docs/proposals/scheduling/activation-preference/lazy-activation-preference.md#simple-case-2-policy-created-after-resource
|
||||||
|
ginkgo.It("Simple Case 2 (Policy created after resource)", func() {
|
||||||
|
ginkgo.By("step1: deployment would not propagate when lazy policy created after deployment", func() {
|
||||||
|
// wait to distinguish whether the deployment will not propagate or have no time to propagate
|
||||||
|
time.Sleep(waitIntervalForLazyPolicyTest)
|
||||||
|
framework.WaitDeploymentDisappearOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step2: resource would propagate when itself updated", func() {
|
||||||
|
updateDeploymentManually(deployment)
|
||||||
|
waitDeploymentPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("Propagate dependencies", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
mountConfigMapToDeployment(deployment, configMapName)
|
||||||
|
framework.CreateConfigMap(kubeClient, configMap)
|
||||||
|
ginkgo.DeferCleanup(func() {
|
||||||
|
framework.RemoveConfigMap(kubeClient, namespace, configMapName)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// Combined Case 6 (Propagate dependencies)
|
||||||
|
// refer: https://github.com/karmada-io/karmada/blob/master/docs/proposals/scheduling/activation-preference/lazy-activation-preference.md#combined-case-6-propagate-dependencies
|
||||||
|
ginkgo.It("Combined Case 6 (Propagate dependencies)", func() {
|
||||||
|
ginkgo.By("step 1: resources would not propagate when lazy policy created after resources", func() {
|
||||||
|
// wait to distinguish whether the resource will not propagate or have no time to propagate
|
||||||
|
time.Sleep(waitIntervalForLazyPolicyTest)
|
||||||
|
framework.WaitDeploymentDisappearOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
framework.WaitConfigMapDisappearOnCluster(originalCluster, namespace, configMapName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step 2: configMap not propagate with deployment since policy PropagateDeps unset", func() {
|
||||||
|
updateDeploymentManually(deployment)
|
||||||
|
waitDeploymentPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
framework.WaitConfigMapDisappearOnCluster(originalCluster, namespace, configMapName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step 3: set PropagateDeps of a lazy policy would not take effect immediately", func() {
|
||||||
|
setPolicyPropagateDeps(policy)
|
||||||
|
// wait to distinguish whether the policy will not take effect or have no time to take effect
|
||||||
|
time.Sleep(waitIntervalForLazyPolicyTest)
|
||||||
|
waitDeploymentPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
framework.WaitConfigMapDisappearOnCluster(originalCluster, namespace, configMapName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.By("step 4: set PropagateDeps of a lazy policy take effect when deployment itself updated", func() {
|
||||||
|
updateDeploymentManually(deployment)
|
||||||
|
waitDeploymentPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
waitConfigMapPresentOnCluster(originalCluster, namespace, deploymentName)
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// updateDeploymentManually manually update deployment
|
||||||
|
func updateDeploymentManually(deployment *appsv1.Deployment) {
|
||||||
|
framework.AppendDeploymentAnnotations(kubeClient, deployment, map[string]string{"reconcileAt": time.Now().Format(time.RFC3339)})
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitDeploymentPresentOnCluster wait deployment present on cluster
|
||||||
|
func waitDeploymentPresentOnCluster(cluster, namespace, name string) {
|
||||||
|
framework.WaitDeploymentPresentOnClusterFitWith(cluster, namespace, name, func(_ *appsv1.Deployment) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitDeploymentReconciled wait reconciliation of deployment finished
|
||||||
|
func waitDeploymentReconciled(namespace, name string) {
|
||||||
|
framework.WaitDeploymentFitWith(kubeClient, namespace, name, func(_ *appsv1.Deployment) bool {
|
||||||
|
// when applying deployment and policy sequentially, we expect deployment to perform the reconcile process before policy,
|
||||||
|
// but the order is actually uncertain, so we sleep a while to wait reconciliation of deployment finished.
|
||||||
|
time.Sleep(waitIntervalForLazyPolicyTest)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitPropagatePolicyReconciled wait reconciliation of PropagatePolicy finished
|
||||||
|
func waitPropagatePolicyReconciled(namespace, name string) {
|
||||||
|
framework.WaitPropagationPolicyFitWith(karmadaClient, namespace, name, func(_ *policyv1alpha1.PropagationPolicy) bool {
|
||||||
|
// when applying policy and deployment sequentially, we expect policy to perform the reconcile process before deployment,
|
||||||
|
// but the order is actually uncertain, so we sleep a while to wait reconciliation of policy finished.
|
||||||
|
time.Sleep(waitIntervalForLazyPolicyTest)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitConfigMapPresentOnCluster wait configmap present on cluster
|
||||||
|
func waitConfigMapPresentOnCluster(cluster, namespace, name string) {
|
||||||
|
framework.WaitConfigMapPresentOnClusterFitWith(cluster, namespace, name, func(_ *corev1.ConfigMap) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// mountConfigMapToDeployment mount ConfigMap to Deployment
|
||||||
|
func mountConfigMapToDeployment(deployment *appsv1.Deployment, configMapName string) {
|
||||||
|
volumes := []corev1.Volume{{
|
||||||
|
Name: "vol-configmap",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||||
|
LocalObjectReference: corev1.LocalObjectReference{
|
||||||
|
Name: configMapName,
|
||||||
|
}}}}}
|
||||||
|
deployment.Spec.Template.Spec.Volumes = volumes
|
||||||
|
}
|
||||||
|
|
||||||
|
// changePlacementTargetCluster change policy target cluster to @modifiedCluster
|
||||||
|
func changePlacementTargetCluster(policy *policyv1alpha1.PropagationPolicy, modifiedCluster string) {
|
||||||
|
policySpec := policy.Spec
|
||||||
|
policySpec.Placement.ClusterAffinity = &policyv1alpha1.ClusterAffinity{ClusterNames: []string{modifiedCluster}}
|
||||||
|
framework.UpdatePropagationPolicyWithSpec(karmadaClient, policy.Namespace, policy.Name, policySpec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setPolicyPropagateDeps set PropagateDeps of policy to true
|
||||||
|
func setPolicyPropagateDeps(policy *policyv1alpha1.PropagationPolicy) {
|
||||||
|
policySpec := policy.Spec
|
||||||
|
policySpec.PropagateDeps = true
|
||||||
|
framework.UpdatePropagationPolicyWithSpec(karmadaClient, policy.Namespace, policy.Name, policySpec)
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue