Add ut for node.go
Signed-off-by: jwcesign <jiangwei115@huawei.com>
This commit is contained in:
parent
9fc1a124b5
commit
3d8cce7473
|
@ -30,7 +30,7 @@ func podStatus(pod *corev1.Pod) string {
|
|||
return pod.Status.ContainerStatuses[0].State.Waiting.Reason
|
||||
}
|
||||
|
||||
func isPodReady(c *kubernetes.Clientset, n, p string) wait.ConditionFunc {
|
||||
func isPodReady(c kubernetes.Interface, n, p string) wait.ConditionFunc {
|
||||
return func() (done bool, err error) {
|
||||
pod, err := c.CoreV1().Pods(n).Get(context.TODO(), p, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -61,12 +61,12 @@ func isPodReady(c *kubernetes.Clientset, n, p string) wait.ConditionFunc {
|
|||
|
||||
// waitPodReady Poll up to timeout seconds for pod to enter running state.
|
||||
// Returns an error if the pod never enters the running state.
|
||||
func waitPodReady(c *kubernetes.Clientset, namespaces, podName string, timeout time.Duration) error {
|
||||
func waitPodReady(c kubernetes.Interface, namespaces, podName string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(time.Second, timeout, isPodReady(c, namespaces, podName))
|
||||
}
|
||||
|
||||
// WaitPodReady wait pod ready
|
||||
func WaitPodReady(c *kubernetes.Clientset, namespace, selector string, timeout int) error {
|
||||
func WaitPodReady(c kubernetes.Interface, namespace, selector string, timeout int) error {
|
||||
// Wait 3 second
|
||||
time.Sleep(3 * time.Second)
|
||||
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector})
|
||||
|
@ -88,7 +88,7 @@ func WaitPodReady(c *kubernetes.Clientset, namespace, selector string, timeout i
|
|||
}
|
||||
|
||||
// WaitEtcdReplicasetInDesired Wait Etcd Ready
|
||||
func WaitEtcdReplicasetInDesired(replicas int32, c *kubernetes.Clientset, namespace, selector string, timeout int) error {
|
||||
func WaitEtcdReplicasetInDesired(replicas int32, c kubernetes.Interface, namespace, selector string, timeout int) error {
|
||||
if err := wait.PollImmediate(time.Second, time.Duration(timeout)*time.Second, func() (done bool, err error) {
|
||||
pods, e := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector})
|
||||
if e != nil {
|
||||
|
|
|
@ -121,7 +121,7 @@ type CommandInitOption struct {
|
|||
CRDs string
|
||||
ExternalIP string
|
||||
ExternalDNS string
|
||||
KubeClientSet *kubernetes.Clientset
|
||||
KubeClientSet kubernetes.Interface
|
||||
CertAndKeyFileData map[string][]byte
|
||||
RestConfig *rest.Config
|
||||
KarmadaAPIServerIP []net.IP
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -15,11 +14,6 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
etcdNodeName string
|
||||
etcdSelectorLabels map[string]string
|
||||
)
|
||||
|
||||
func (i *CommandInitOption) getKarmadaAPIServerIP() error {
|
||||
if i.KarmadaAPIServerAdvertiseAddress != "" {
|
||||
i.KarmadaAPIServerIP = append(i.KarmadaAPIServerIP, utils.StringToNetIP(i.KarmadaAPIServerAdvertiseAddress))
|
||||
|
@ -67,8 +61,8 @@ func (i *CommandInitOption) getKarmadaAPIServerIP() error {
|
|||
}
|
||||
|
||||
// nodeStatus Check the node status, if it is an unhealthy node, return false.
|
||||
func nodeStatus(node []corev1.NodeCondition) bool {
|
||||
for _, v := range node {
|
||||
func nodeStatus(nodeConditions []corev1.NodeCondition) bool {
|
||||
for _, v := range nodeConditions {
|
||||
switch v.Type {
|
||||
case corev1.NodeReady:
|
||||
if v.Status != corev1.ConditionTrue {
|
||||
|
@ -103,6 +97,8 @@ func (i *CommandInitOption) AddNodeSelectorLabels() error {
|
|||
return err
|
||||
}
|
||||
|
||||
var etcdNodeName string
|
||||
var etcdSelectorLabels map[string]string
|
||||
for _, v := range nodes.Items {
|
||||
if v.Spec.Taints != nil {
|
||||
continue
|
||||
|
@ -111,6 +107,9 @@ func (i *CommandInitOption) AddNodeSelectorLabels() error {
|
|||
if nodeStatus(v.Status.Conditions) {
|
||||
etcdNodeName = v.Name
|
||||
etcdSelectorLabels = v.Labels
|
||||
if etcdSelectorLabels == nil {
|
||||
etcdSelectorLabels = map[string]string{}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -130,8 +129,7 @@ func (i *CommandInitOption) AddNodeSelectorLabels() error {
|
|||
}
|
||||
|
||||
func (i *CommandInitOption) isNodeExist(labels string) bool {
|
||||
l := strings.Split(labels, "=")
|
||||
node, err := i.KubeClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: l[0]})
|
||||
node, err := i.KubeClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -0,0 +1,251 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestCommandInitOption_getKarmadaAPIServerIP(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
option CommandInitOption
|
||||
nodes []string
|
||||
labels map[string]string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "KarmadaAPIServerAdvertiseAddress is not empty",
|
||||
option: CommandInitOption{
|
||||
KubeClientSet: fake.NewSimpleClientset(),
|
||||
KarmadaAPIServerAdvertiseAddress: "127.0.0.1",
|
||||
},
|
||||
nodes: []string{"node1"},
|
||||
labels: map[string]string{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "three nodes but they are not master",
|
||||
option: CommandInitOption{
|
||||
KubeClientSet: fake.NewSimpleClientset(),
|
||||
},
|
||||
nodes: []string{"node1", "node2", "node3"},
|
||||
labels: map[string]string{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "three master nodes",
|
||||
option: CommandInitOption{
|
||||
KubeClientSet: fake.NewSimpleClientset(),
|
||||
},
|
||||
nodes: []string{"node1", "node2", "node3"},
|
||||
labels: map[string]string{"node-role.kubernetes.io/control-plane": ""},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no nodes",
|
||||
option: CommandInitOption{
|
||||
KubeClientSet: fake.NewSimpleClientset(),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
for _, v := range tt.nodes {
|
||||
_, err := tt.option.KubeClientSet.CoreV1().Nodes().Create(context.Background(), &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: v,
|
||||
Labels: tt.labels,
|
||||
},
|
||||
Status: corev1.NodeStatus{
|
||||
Addresses: []corev1.NodeAddress{
|
||||
{Address: "127.0.0.1"},
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("create node error: %v", err)
|
||||
}
|
||||
}
|
||||
if err := tt.option.getKarmadaAPIServerIP(); (err != nil) != tt.wantErr {
|
||||
t.Errorf("CommandInitOption.getKarmadaAPIServerIP() = %v, want error:%v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_nodeStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeConditions []corev1.NodeCondition
|
||||
isHealth bool
|
||||
}{
|
||||
{
|
||||
name: "node is ready",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodeReady, Status: corev1.ConditionTrue},
|
||||
},
|
||||
isHealth: true,
|
||||
},
|
||||
{
|
||||
name: "node is unready",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodeReady, Status: corev1.ConditionFalse},
|
||||
},
|
||||
isHealth: false,
|
||||
},
|
||||
{
|
||||
name: "node's memory pressure is true",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionTrue},
|
||||
},
|
||||
isHealth: false,
|
||||
},
|
||||
{
|
||||
name: "node's memory pressure is false",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionFalse},
|
||||
},
|
||||
isHealth: true,
|
||||
},
|
||||
{
|
||||
name: "node's disk pressure is true",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodeDiskPressure, Status: corev1.ConditionTrue},
|
||||
},
|
||||
isHealth: false,
|
||||
},
|
||||
{
|
||||
name: "node's disk pressure is false",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodeDiskPressure, Status: corev1.ConditionFalse},
|
||||
},
|
||||
isHealth: true,
|
||||
},
|
||||
{
|
||||
name: "node's network unavailable is false",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodeNetworkUnavailable, Status: corev1.ConditionFalse},
|
||||
},
|
||||
isHealth: true,
|
||||
},
|
||||
{
|
||||
name: "node's network unavailable is true",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodeNetworkUnavailable, Status: corev1.ConditionTrue},
|
||||
},
|
||||
isHealth: false,
|
||||
},
|
||||
{
|
||||
name: "node's pid pressure is false",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse},
|
||||
},
|
||||
isHealth: true,
|
||||
},
|
||||
{
|
||||
name: "node's pid pressure is true",
|
||||
nodeConditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodePIDPressure, Status: corev1.ConditionTrue},
|
||||
},
|
||||
isHealth: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := nodeStatus(tt.nodeConditions); got != tt.isHealth {
|
||||
t.Errorf("nodeStatus() = %v, want %v", got, tt.isHealth)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommandInitOption_AddNodeSelectorLabels(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
option CommandInitOption
|
||||
status corev1.ConditionStatus
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "there is healthy node",
|
||||
option: CommandInitOption{
|
||||
KubeClientSet: fake.NewSimpleClientset(),
|
||||
},
|
||||
status: corev1.ConditionTrue,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "there is unhealthy node",
|
||||
option: CommandInitOption{
|
||||
KubeClientSet: fake.NewSimpleClientset(),
|
||||
},
|
||||
status: corev1.ConditionFalse,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := tt.option.KubeClientSet.CoreV1().Nodes().Create(context.Background(), &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-node"},
|
||||
Status: corev1.NodeStatus{
|
||||
Conditions: []corev1.NodeCondition{
|
||||
{Type: corev1.NodeReady, Status: tt.status},
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("create node error: %v", err)
|
||||
}
|
||||
if err := tt.option.AddNodeSelectorLabels(); (err != nil) != tt.wantErr {
|
||||
t.Errorf("CommandInitOption.AddNodeSelectorLabels() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommandInitOption_isNodeExist(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
option CommandInitOption
|
||||
nodeName string
|
||||
labels map[string]string
|
||||
exists bool
|
||||
}{
|
||||
{
|
||||
name: "there is matched node",
|
||||
option: CommandInitOption{
|
||||
KubeClientSet: fake.NewSimpleClientset(),
|
||||
},
|
||||
nodeName: "node1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
exists: true,
|
||||
},
|
||||
{
|
||||
name: "there is no matched node",
|
||||
option: CommandInitOption{
|
||||
KubeClientSet: fake.NewSimpleClientset(),
|
||||
},
|
||||
nodeName: "node2",
|
||||
labels: map[string]string{"bar": "foo"},
|
||||
exists: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := tt.option.KubeClientSet.CoreV1().Nodes().Create(context.Background(), &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: tt.nodeName, Labels: tt.labels},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("create node error: %v", err)
|
||||
}
|
||||
if got := tt.option.isNodeExist("foo=bar"); got != tt.exists {
|
||||
t.Errorf("CommandInitOption.isNodeExist() = %v, want %v", got, tt.exists)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue