Merge pull request #707 from DualSpark/node-validation

New feature - node validation
This commit is contained in:
Kris Childress 2016-11-29 16:47:44 -07:00 committed by GitHub
commit 72c5868966
13 changed files with 1208 additions and 8 deletions

View File

@ -77,6 +77,7 @@ codegen: kops-gobindata
test:
go test k8s.io/kops/upup/pkg/... -args -v=1 -logtostderr
go test k8s.io/kops/pkg/... -args -v=1 -logtostderr
crossbuild:
mkdir -p .build/dist/
@ -199,6 +200,7 @@ gofmt:
gofmt -w -s cmd/
gofmt -w -s examples/
gofmt -w -s federation/
gofmt -w -s pkg/
gofmt -w -s util/
gofmt -w -s upup/pkg/
gofmt -w -s pkg/
@ -211,6 +213,7 @@ gofmt:
govet:
go vet \
k8s.io/kops/cmd/... \
k8s.io/kops/pkg/... \
k8s.io/kops/channels/... \
k8s.io/kops/examples/... \
k8s.io/kops/federation/... \

43
cmd/kops/validate.go Normal file
View File

@ -0,0 +1,43 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"github.com/spf13/cobra"
)
// ValidateCmd represents the get command
type ValidateCmd struct {
output string
cobraCommand *cobra.Command
}
var validateCmd = ValidateCmd{
cobraCommand: &cobra.Command{
Use: "validate",
SuggestFor: []string{"list"},
Short: "Validate Cluster",
Long: `Validate a Kubernetes Cluster`,
},
}
func init() {
cmd := validateCmd.cobraCommand
rootCommand.AddCommand(cmd)
}

View File

@ -0,0 +1,163 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"strings"
"github.com/spf13/cobra"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/util/pkg/tables"
k8sapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"errors"
"fmt"
"os"
)
// not used too much yet :)
type ValidateClusterCmd struct {
}
var validateClusterCmd ValidateClusterCmd
// Init darn it
func init() {
cmd := &cobra.Command{
Use: "cluster",
Aliases: []string{"cluster"},
Short: "Validate cluster",
Long: `Validate a kubernetes cluster`,
Run: func(cmd *cobra.Command, args []string) {
err := validateClusterCmd.Run(args)
if err != nil {
exitWithError(err)
}
},
}
validateCmd.cobraCommand.AddCommand(cmd)
}
// Validate Your Kubernetes Cluster
func (c *ValidateClusterCmd) Run(args []string) error {
err := rootCommand.ProcessArgs(args)
if err != nil {
return fmt.Errorf("Process args failed %v", err)
}
cluster, err := rootCommand.Cluster()
if err != nil {
return fmt.Errorf("Cannot get cluster for %v", err)
}
clientSet, err := rootCommand.Clientset()
if err != nil {
return fmt.Errorf("Cannot get clientSet for %q: %v", cluster.Name, err)
}
list, err := clientSet.InstanceGroups(cluster.Name).List(k8sapi.ListOptions{})
if err != nil {
return fmt.Errorf("Cannot get nodes for %q: %v", cluster.Name, err)
}
fmt.Printf("Validating cluster %v\n\n", cluster.Name)
var instanceGroups []*api.InstanceGroup
for _, ig := range list.Items {
instanceGroups = append(instanceGroups, &ig)
}
if len(instanceGroups) == 0 {
return errors.New("No InstanceGroup objects found\n")
}
validationCluster, validationFailed := api.ValidateCluster(cluster.Name, list)
if validationCluster.NodeList == nil {
return fmt.Errorf("Cannot get nodes for %q: %v", cluster.Name, validationFailed)
}
t := &tables.Table{}
t.AddColumn("NAME", func(c *api.InstanceGroup) string {
return c.Name
})
t.AddColumn("ROLE", func(c *api.InstanceGroup) string {
return string(c.Spec.Role)
})
t.AddColumn("MACHINETYPE", func(c *api.InstanceGroup) string {
return c.Spec.MachineType
})
t.AddColumn("ZONES", func(c *api.InstanceGroup) string {
return strings.Join(c.Spec.Zones, ",")
})
t.AddColumn("MIN", func(c *api.InstanceGroup) string {
return intPointerToString(c.Spec.MinSize)
})
t.AddColumn("MAX", func(c *api.InstanceGroup) string {
return intPointerToString(c.Spec.MaxSize)
})
fmt.Println("INSTANCE GROUPS")
err = t.Render(instanceGroups, os.Stdout, "NAME", "ROLE", "MACHINETYPE", "MIN", "MAX", "ZONES")
if err != nil {
return fmt.Errorf("Cannot render nodes for %q: %v", cluster.Name, err)
}
t = &tables.Table{}
t.AddColumn("NAME", func(n v1.Node) string {
return n.Name
})
t.AddColumn("READY", func(n v1.Node) v1.ConditionStatus {
return api.GetNodeConditionStatus(n.Status.Conditions)
})
t.AddColumn("ROLE", func(n v1.Node) string {
role := "node"
if val, ok := n.ObjectMeta.Labels["kubernetes.io/role"]; ok {
role = val
}
return role
})
fmt.Println("\nNODE STATUS")
err = t.Render(validationCluster.NodeList.Items, os.Stdout, "NAME", "ROLE", "READY")
if err != nil {
return fmt.Errorf("Cannot render nodes for %q: %v", cluster.Name, err)
}
if validationFailed == nil {
fmt.Printf("\nYour cluster %s is ready\n", cluster.Name)
return nil
} else {
// do we need to print which instance group is not ready?
// nodes are going to be a pain
fmt.Printf("cluster - masters ready: %v, nodes ready: %v", validationCluster.MastersReady, validationCluster.NodesReady)
fmt.Printf("mastersNotReady %v", len(validationCluster.MastersNotReadyArray))
fmt.Printf("mastersCount %v, mastersReady %v", validationCluster.MastersCount, len(validationCluster.MastersReadyArray))
fmt.Printf("nodesNotReady %v", len(validationCluster.NodesNotReadyArray))
fmt.Printf("nodesCount %v, nodesReady %v", validationCluster.NodesCount, len(validationCluster.NodesReadyArray))
return fmt.Errorf("\nYour cluster %s is NOT ready.", cluster.Name)
}
}

View File

@ -37,5 +37,6 @@ It allows you to create, destroy, upgrade and maintain clusters.
* [kops toolbox](kops_toolbox.md) - Misc infrequently used commands
* [kops update](kops_update.md) - update clusters
* [kops upgrade](kops_upgrade.md) - upgrade clusters
* [kops validate](kops_validate.md) - Validate Cluster
* [kops version](kops_version.md) - Print the client version information

View File

@ -18,8 +18,8 @@ kops completion
```
# load in the kops completion code for bash (depends on the bash-completion framework).
source <(kops completion bash)
# load in the kops completion code for bash (depends on the bash-completion framework).
source <(kops completion bash)
```
### Options

View File

@ -25,18 +25,17 @@ kops create cluster
--master-zones string Zones in which to run masters (must be an odd number)
--model string Models to apply (separate multiple models with commas) (default "config,proto,cloudup")
--network-cidr string Set to override the default network CIDR
--networking string Networking mode to use. kubenet (default), classic, external, cni. (default "kubenet")
--networking string Networking mode to use. kubenet (default), classic, external, cni, kopeio-vxlan, weave. (default "kubenet")
--node-count int Set the number of nodes
--node-size string Set instance size for nodes
--out string Path to write any local output
--project string Project to use (must be set on GCE)
--ssh-public-key string SSH public key to use (default "~/.ssh/id_rsa.pub")
--target string Target - direct, terraform (default "direct")
-t, --topology string Controls network topology for the cluster. public|private. Default is 'public'. (default "public")
--vpc string Set to use a shared VPC
--yes Specify --yes to immediately create the cluster
--zones string Zones in which to run the cluster
--topology string Specify --topology=[public|private] to enable/disable public/private networking for all master and nodes. Default is 'public'
```
### Options inherited from parent commands
@ -56,3 +55,4 @@ kops create cluster
### SEE ALSO
* [kops create](kops_create.md) - Create a resource by filename or stdin

28
docs/cli/kops_validate.md Normal file
View File

@ -0,0 +1,28 @@
## kops validate
Validate Cluster
### Synopsis
Validate a Kubernetes Cluster
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--config string config file (default is $HOME/.kops.yaml)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files (default false)
--name string Name of cluster
--state string Location of state storage
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [kops](kops.md) - kops is kubernetes ops
* [kops validate cluster](kops_validate_cluster.md) - Validate cluster

View File

@ -0,0 +1,31 @@
## kops validate cluster
Validate cluster
### Synopsis
Validate a kubernetes cluster
```
kops validate cluster
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--config string config file (default is $HOME/.kops.yaml)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files (default false)
--name string Name of cluster
--state string Location of state storage
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [kops validate](kops_validate.md) - Validate Cluster

View File

@ -0,0 +1,384 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kops
import (
"time"
"fmt"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/util/wait"
)
const (
// How often to Poll pods, nodes and claims.
Poll = 2 * time.Second
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
SingleCallTimeout = 5 * time.Minute
)
// NodeAPIAdapter used to retrieve information about Nodes in K8s
// TODO: should we pool the api client connection? My initial thought is no.
type NodeAPIAdapter struct {
// K8s API client this sucker talks to K8s directly - not kubectl, hard api call
client interface{}
// K8s timeout on method call
timeout time.Duration
// K8s node name if applicable
nodeName string
}
// Create a NodeAPIAdapter with K8s client based on the current kubectl config
// TODO I do not really like this .... hrmm
func (nodeAA *NodeAPIAdapter) BuildNodeAPIAdapter(clusterName string, timeout time.Duration, nodeName string) (err error) {
config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
clientcmd.NewDefaultClientConfigLoadingRules(),
&clientcmd.ConfigOverrides{CurrentContext: clusterName}).ClientConfig()
if err != nil {
return fmt.Errorf("cannot load kubecfg settings for %q: %v", clusterName, err)
}
c, err := release_1_5.NewForConfig(config)
if err != nil {
return fmt.Errorf("cannot build kube client for %q: %v", clusterName, err)
}
if err != nil {
return fmt.Errorf("creating client go boom, %v", err)
}
nodeAA.client = c
nodeAA.timeout = timeout
nodeAA.nodeName = nodeName
return nil
}
// GetAllNodes is a access to get all nodes from a cluster api
func (nodeAA *NodeAPIAdapter) GetAllNodes() (nodes *v1.NodeList, err error) {
c, err := nodeAA.getClient()
if err != nil {
glog.V(4).Infof("getClient failed for node %s, %v", nodeAA.nodeName, err)
return nil, err
}
opts := v1.ListOptions{}
nodes, err = c.Nodes().List(opts)
if err != nil {
glog.V(4).Infof("getting nodes failed for node %v", err)
return nil, err
}
return nodes, nil
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func (nodeAA *NodeAPIAdapter) GetReadySchedulableNodes() (nodes *v1.NodeList, err error) {
nodes, err = nodeAA.waitListSchedulableNodes()
if err != nil {
return nil, fmt.Errorf("GetReadySchedulableNodes go boom %v", err)
}
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node v1.Node) (bool, error) {
return isNodeSchedulable(&node)
})
return nodes, err
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func (nodeAA *NodeAPIAdapter) WaitForNodeToBeReady() (bool, error) {
return nodeAA.WaitForNodeToBe(v1.NodeReady, true)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func (nodeAA *NodeAPIAdapter) WaitForNodeToBeNotReady() (bool, error) {
return nodeAA.WaitForNodeToBe(v1.NodeReady, false)
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func (nodeAA *NodeAPIAdapter) WaitForNodeToBe(conditionType v1.NodeConditionType, wantTrue bool) (bool, error) {
if err := nodeAA.isNodeNameDefined(); err != nil {
return false, fmt.Errorf("isNodeNameDefined failed for node %s, %v", nodeAA.nodeName, err)
}
if err := nodeAA.isClientDefined(); err != nil {
return false, fmt.Errorf("isClientDefined failed for node %s, %v", nodeAA.nodeName, err)
}
glog.V(4).Infof("Waiting up to %v for node %s condition %s to be %t", nodeAA.timeout, nodeAA.nodeName, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < nodeAA.timeout; time.Sleep(Poll) {
c, err := nodeAA.getClient()
if err != nil {
glog.V(4).Infof("getClient failed for node %s, %v", nodeAA.nodeName, err)
return false, err
}
node, err := c.Nodes().Get(nodeAA.nodeName)
// FIXME this is not erroring on 500's for instance. We will keep looping
if err != nil {
glog.V(4).Infof("Couldn't get node %s", nodeAA.nodeName)
continue
}
iSet, err := IsNodeConditionSetAsExpected(node, conditionType, wantTrue)
if err != nil {
glog.V(4).Infof("IsNodeConditionSetAsExpected failed for node %s, %v", nodeAA.nodeName, err)
return false, err
}
if iSet {
return true, nil
}
}
glog.V(4).Infof("Node %s didn't reach desired %s condition status (%t) within %v", nodeAA.nodeName, conditionType, wantTrue, nodeAA.timeout)
return false, nil
}
// IsNodeConditionSetAsExpectedSilent node conidtion is
func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) (bool, error) {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
// IsNodeConditionUnset check that node condition is not set
func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) (bool, error) {
if err := isNodeStatusDefined(node); err != nil {
return false, err
}
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false, nil
}
}
return true, nil
}
func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) (test bool, err error)) {
var l []v1.Node
for _, node := range nodeList.Items {
test, err := fn(node)
if err != nil {
// FIXME error handling?
return
}
if test {
l = append(l, node)
}
}
nodeList.Items = l
}
func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) (bool, error) {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func (nodeAA *NodeAPIAdapter) waitListSchedulableNodes() (nodes *v1.NodeList, err error) {
if err = nodeAA.isClientDefined(); err != nil {
return nil, err
}
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
c, err := nodeAA.getClient()
if err != nil {
// error logging TODO
return false, err
}
nodes, err = c.Nodes().List(v1.ListOptions{FieldSelector: "spec.unschedulable=false"})
if err != nil {
// error logging TODO
return false, err
}
return err == nil, nil
}) != nil {
// TODO logging
return nil, err
}
return nodes, err
}
func (nodeAA *NodeAPIAdapter) getClient() (*release_1_5.Clientset, error) {
// FIXME double check
if nodeAA.client == nil {
return nil, errors.New("Client cannot be null")
}
c := nodeAA.client.(*release_1_5.Clientset)
return c, nil
}
// TODO: remove slient bool ... but what is `wantTrue` defined as
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) (bool, error) {
if err := isNodeStatusDefined(node); err != nil {
return false, err
}
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
if (cond.Status == v1.ConditionTrue) == wantTrue {
return true, nil
} else {
if !silent {
glog.V(4).Infof(
"Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false, nil
}
}
}
if !silent {
glog.V(4).Infof("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false, nil
}
// Node is schedulable if:
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *v1.Node) (bool, error) {
nodeReady, err := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
if err != nil {
return false, err
}
networkUnval, err := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable)
if err != nil {
return false, err
}
networkUnvalSilent, err := IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
if err != nil {
return false, err
}
networkReady := networkUnval || networkUnvalSilent
return !node.Spec.Unschedulable && nodeReady && networkReady, nil
}
func (nodeAA *NodeAPIAdapter) isNodeNameDefined() error {
if nodeAA.nodeName == "" {
return errors.New("nodeName must be defined in nodeAA struct")
}
return nil
}
func (nodeAA *NodeAPIAdapter) isClientDefined() error {
if nodeAA.client == nil {
return errors.New("client must be defined in the struct")
}
return nil
}
func isNodeStatusDefined(node *v1.Node) error {
if node == nil {
return errors.New("node cannot be nil")
}
// FIXME how do I test this?
/*
if node.Status == nil {
return errors.New("node.Status cannot be nil")
}*/
return nil
}
// Get The Status of a Node
func GetNodeConditionStatus(nodeConditions []v1.NodeCondition) v1.ConditionStatus {
s := v1.ConditionUnknown
for _, element := range nodeConditions {
if element.Type == "Ready" {
s = element.Status
break
}
}
return s
}
// Node is ready if:
// 1) it's Ready condition is set to true
// 2) doesn't have NetworkUnavailable condition set to true
func IsNodeOrMasterReady(node *v1.Node) (bool, error) {
nodeReady, err := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
if err != nil {
return false, err
}
networkUnval, err := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable)
if err != nil {
return false, err
}
networkUnvalSilent, err := IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
if err != nil {
return false, err
}
networkReady := networkUnval || networkUnvalSilent
return nodeReady && networkReady, nil
}

View File

@ -0,0 +1,175 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kops
import (
"testing"
//"time"
//"github.com/golang/glog"
//k8sapi "k8s.io/kubernetes/pkg/api"
//"k8s.io/kubernetes/pkg/client/unversioned/testclient"
//"k8s.io/kubernetes/pkg/client/unversioned/testclient/simple"
//"k8s.io/kubernetes/pkg/api/testapi"
)
func TestBuildNodeAPIAdapter(t *testing.T) {
}
func TestGetReadySchedulableNodes(t *testing.T) {
}
// TODO not working since they changed the darn api
/*
func TestWaitForNodeToBeReady(t *testing.T) {
conditions := make([]k8sapi.NodeCondition,1)
conditions[0] = k8sapi.NodeCondition{Type:"Ready",Status:"True"}
nodeAA := setupNodeAA(t,conditions)
test, err := nodeAA.WaitForNodeToBeReady()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if test != true {
t.Fatalf("unexpected error WaitForNodeToBeReady Failed: %v", test)
}
}
func TestWaitForNodeToBeNotReady(t *testing.T) {
conditions := make([]k8sapi.NodeCondition,1)
conditions[0] = k8sapi.NodeCondition{Type:"Ready",Status:"False"}
nodeAA := setupNodeAA(t,conditions)
test, err := nodeAA.WaitForNodeToBeNotReady()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if test != true {
t.Fatalf("unexpected error WaitForNodeToBeReady Failed: %v", test)
}
}
func TestIsNodeConditionUnset(t *testing.T) {
}
func setupNodeAA(t *testing.T, conditions []k8sapi.NodeCondition)(*NodeAPIAdapter) {
c := &simple.Client{
Request: simple.Request{
Method: "GET",
Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "foo"),
},
Response: simple.Response{
StatusCode: 200,
Body: &k8sapi.Node{
ObjectMeta: k8sapi.ObjectMeta{Name: "node-foo"},
Spec: k8sapi.NodeSpec{ Unschedulable: false },
Status: k8sapi.NodeStatus{ Conditions: conditions},
},
},
}
c.Setup(t).Clientset.Nodes().Get("foo")
//c.Validate(t, response, err)
return &NodeAPIAdapter{
client: c.Clientset,
timeout: time.Duration(10)*time.Second,
nodeName: "foo",
}
}
*/
/*
func mockClient() *testclient.Fake {
return testclient.NewSimpleFake(dummyNode())
}
// Create a NodeAPIAdapter with K8s client based on the current kubectl config
func buildMockNodeAPIAdapter(nodeName string, t *testing.T) *NodeAPIAdapter {
s := simple.Client{}
c := s.Setup(t)
c.Nodes().Create(dummyNode())
node, err := c.Client.Nodes().Get("foo")
glog.V(4).Infof("node call %v, %v", node, err)
return &NodeAPIAdapter{
client: c.Client,
timeout: time.Duration(10)*time.Second,
nodeName: nodeName,
}
}
func dummyNode() *api.Node {
return &api.Node{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
Spec: api.NodeSpec{
Unschedulable: false,
},
}
}*/
func getNodesResourceName() string {
return "nodes"
}
/// Example mocking of api
/*
type secretsClient struct {
unversioned.Interface
}
// dummySecret generates a secret with one user inside the auth key
// foo:md5(bar)
func dummySecret() *api.Secret {
return &api.Secret{
ObjectMeta: api.ObjectMeta{
Namespace: api.NamespaceDefault,
Name: "demo-secret",
},
Data: map[string][]byte{"auth": []byte("foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0")},
}
}
func mockClient() *testclient.Fake {
return testclient.NewSimpleFake(dummySecret())
}
func TestIngressWithoutAuth(t *testing.T) {
ing := buildIngress()
client := mockClient()
_, err := ParseAnnotations(client, ing, "")
if err == nil {
t.Error("Expected error with ingress without annotations")
}
if err == ErrMissingAuthType {
t.Errorf("Expected MissingAuthType error but returned %v", err)
}
}
*/

View File

@ -0,0 +1,153 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kops
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api/v1"
)
const (
Node = "node"
Master = "master"
)
// A cluster to validate
type ValidationCluster struct {
MastersReady bool `json:"mastersReady,omitempty"`
MastersReadyArray []*ValidationNode `json:"mastersReadyArray,omitempty"`
MastersNotReadyArray []*ValidationNode `json:"mastersNotReadyArray,omitempty"`
MastersCount int `json:"mastersCount,omitempty"`
NodesReady bool `json:"nodesReady,omitempty"`
NodesReadyArray []*ValidationNode `json:"nodesReadyArray,omitempty"`
NodesNotReadyArray []*ValidationNode `json:"nodesNotReadyArray,omitempty"`
NodesCount int `json:"nodesCount,omitempty"`
NodeList *v1.NodeList `json:"nodeList,omitempty"`
}
// A K8s node to be validated
type ValidationNode struct {
Zone string `json:"zone,omitempty"`
Role string `json:"role,omitempty"`
Hostname string `json:"hostname,omitempty"`
Status v1.ConditionStatus `json:"status,omitempty"`
}
// ValidateClusterWithIg validate a k8s clsuter with a provided instance group list
func ValidateCluster(clusterName string, instanceGroupList *InstanceGroupList) (*ValidationCluster, error) {
var instanceGroups []*InstanceGroup
validationCluster := &ValidationCluster{}
for i := range instanceGroupList.Items {
ig := &instanceGroupList.Items[i]
instanceGroups = append(instanceGroups, ig)
if ig.Spec.Role == InstanceGroupRoleMaster {
validationCluster.MastersCount += *ig.Spec.MinSize
} else if ig.Spec.Role == InstanceGroupRoleNode {
validationCluster.NodesCount += *ig.Spec.MinSize
}
}
if len(instanceGroups) == 0 {
return validationCluster, fmt.Errorf("No InstanceGroup objects found\n")
}
nodeAA := &NodeAPIAdapter{}
timeout, err := time.ParseDuration("30s")
if err != nil {
return nil, fmt.Errorf("Cannot set timeout %q: %v", clusterName, err)
}
nodeAA.BuildNodeAPIAdapter(clusterName, timeout, "")
validationCluster.NodeList, err = nodeAA.GetAllNodes()
if err != nil {
return nil, fmt.Errorf("Cannot get nodes for %q: %v", clusterName, err)
}
return validateTheNodes(clusterName, validationCluster)
}
func validateTheNodes(clusterName string, validationCluster *ValidationCluster) (*ValidationCluster, error) {
nodes := validationCluster.NodeList
if nodes == nil || nodes.Items == nil {
return validationCluster, fmt.Errorf("No nodes found in validationCluster")
}
for _, node := range nodes.Items {
role := Node
if val, ok := node.ObjectMeta.Labels["kubernetes.io/role"]; ok {
role = val
}
n := &ValidationNode{
Zone: node.ObjectMeta.Labels["failure-domain.beta.kubernetes.io/zone"],
Hostname: node.ObjectMeta.Labels["kubernetes.io/hostname"],
Role: role,
Status: GetNodeConditionStatus(node.Status.Conditions),
}
ready, err := IsNodeOrMasterReady(&node)
if err != nil {
return validationCluster, fmt.Errorf("Cannot test if node is ready: %s", node.Name)
}
if n.Role == Master {
if ready {
validationCluster.MastersReadyArray = append(validationCluster.MastersReadyArray, n)
} else {
validationCluster.MastersNotReadyArray = append(validationCluster.MastersNotReadyArray, n)
}
} else if n.Role == Node {
if ready {
validationCluster.NodesReadyArray = append(validationCluster.NodesReadyArray, n)
} else {
validationCluster.NodesNotReadyArray = append(validationCluster.NodesNotReadyArray, n)
}
}
}
validationCluster.MastersReady = true
if len(validationCluster.MastersNotReadyArray) != 0 || validationCluster.MastersCount !=
len(validationCluster.MastersReadyArray) {
validationCluster.MastersReady = false
}
validationCluster.NodesReady = true
if len(validationCluster.NodesNotReadyArray) != 0 || validationCluster.NodesCount !=
len(validationCluster.NodesReadyArray) {
validationCluster.NodesReady = false
}
if validationCluster.MastersReady && validationCluster.NodesReady {
return validationCluster, nil
} else {
return validationCluster, fmt.Errorf("Your cluster is NOT ready %s", clusterName)
}
}

View File

@ -0,0 +1,218 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kops
import (
"testing"
"fmt"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
)
func Test_ValidateClusterPositive(t *testing.T) {
nodeList, err := dummyClient("true", "true").Core().Nodes().List(v1.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
validationCluster := &ValidationCluster{NodeList: nodeList, NodesCount: 1, MastersCount: 1}
validationCluster, err = validateTheNodes("foo", validationCluster)
if err != nil {
printDebug(validationCluster)
t.Fatalf("unexpected error: %v", err)
}
}
func Test_ValidateClusterMasterAndNodeNotReady(t *testing.T) {
nodeList, err := dummyClient("false", "false").Core().Nodes().List(v1.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
validationCluster := &ValidationCluster{NodeList: nodeList, NodesCount: 1, MastersCount: 1}
validationCluster, err = validateTheNodes("foo", validationCluster)
if err == nil {
printDebug(validationCluster)
t.Fatalf("unexpected error: %v", err)
}
}
func Test_ValidateClusterNodeNotReady(t *testing.T) {
nodeList, err := dummyClient("true", "false").Core().Nodes().List(v1.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
validationCluster := &ValidationCluster{NodeList: nodeList, NodesCount: 1, MastersCount: 1}
validationCluster, err = validateTheNodes("foo", validationCluster)
if err == nil {
printDebug(validationCluster)
t.Fatalf("unexpected error: %v", err)
}
}
func Test_ValidateClusterMastersNotEnough(t *testing.T) {
nodeList, err := dummyClient("true", "true").Core().Nodes().List(v1.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
validationCluster := &ValidationCluster{NodeList: nodeList, NodesCount: 1, MastersCount: 3}
validationCluster, err = validateTheNodes("foo", validationCluster)
if err == nil {
printDebug(validationCluster)
t.Fatalf("unexpected error: %v", err)
}
}
func printDebug(validationCluster *ValidationCluster) {
fmt.Printf("cluster - masters ready: %v, nodes ready: %v\n", validationCluster.MastersReady, validationCluster.NodesReady)
fmt.Printf("mastersNotReady %v\n", len(validationCluster.MastersNotReadyArray))
fmt.Printf("mastersCount %v, mastersReady %v\n", validationCluster.MastersCount, len(validationCluster.MastersReadyArray))
fmt.Printf("nodesNotReady %v\n", len(validationCluster.NodesNotReadyArray))
fmt.Printf("nodesCount %v, nodesReady %v\n", validationCluster.NodesCount, len(validationCluster.NodesReadyArray))
}
const NODE_READY = "nodeReady"
func dummyClient(masterReady string, nodeReady string) *fake.Clientset {
return fake.NewSimpleClientset(makeNodeList(
[]map[string]string{
{
"name": "master1",
"kubernetes.io/role": "master",
NODE_READY: masterReady,
},
{
"name": "node1",
"kubernetes.io/role": "node",
NODE_READY: nodeReady,
},
},
))
}
func dummyNode(nodeMap map[string]string) v1.Node {
nodeReady := v1.ConditionFalse
if nodeMap[NODE_READY] == "true" {
nodeReady = v1.ConditionTrue
}
expectedNode := v1.Node{
ObjectMeta: v1.ObjectMeta{
Name: nodeMap["name"],
Labels: map[string]string{
"kubernetes.io/role": nodeMap["kubernetes.io/role"],
},
},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
},
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: "kubelet has sufficient memory available",
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: "kubelet has sufficient disk space available",
},
{
Type: v1.NodeReady,
Status: nodeReady,
Reason: "KubeletReady",
Message: "kubelet is posting ready status",
},
},
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OSImage: "Debian GNU/Linux 7 (wheezy)",
//OperatingSystem: goruntime.GOOS,
//Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
//KubeletVersion: version.Get().String(),
//KubeProxyVersion: version.Get().String(),
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: nodeMap["name"]},
},
// images will be sorted from max to min in node status.
Images: []v1.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
},
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
},
},
}
return expectedNode
}
// MakeNodeList constructs api.NodeList from list of node names and a NodeResource.
func makeNodeList(nodes []map[string]string) *v1.NodeList {
var list v1.NodeList
for _, node := range nodes {
list.Items = append(list.Items, dummyNode(node))
}
return &list
}

View File

@ -17,13 +17,14 @@ limitations under the License.
package kutil
import (
"reflect"
"sort"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"k8s.io/kops/cloudmock/aws/mockec2"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"reflect"
"sort"
"testing"
)
func TestAddUntaggedRouteTables(t *testing.T) {