karmada/pkg/util/lifted/corev1printers.go

349 lines
12 KiB
Go

/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifted
import (
"fmt"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/duration"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/karmada-io/karmada/pkg/printers"
)
const (
// NodeUnreachablePodReason is the reason on a pod when its state cannot be confirmed as kubelet is unresponsive
// on the node it is (was) running.
NodeUnreachablePodReason = "NodeLost"
)
var (
podSuccessConditions = []metav1.TableRowCondition{{Type: metav1.RowCompleted, Status: metav1.ConditionTrue, Reason: string(corev1.PodSucceeded), Message: "The pod has completed successfully."}}
podFailedConditions = []metav1.TableRowCondition{{Type: metav1.RowCompleted, Status: metav1.ConditionTrue, Reason: string(corev1.PodFailed), Message: "The pod failed."}}
)
// AddCoreV1Handlers adds print handlers for core with v1 versions.
func AddCoreV1Handlers(h printers.PrintHandler) {
podColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "Ready", Type: "string", Description: "The aggregate readiness state of this pod for accepting traffic."},
{Name: "Status", Type: "string", Description: "The aggregate status of the containers in this pod."},
{Name: "Restarts", Type: "integer", Description: "The number of times the containers in this pod have been restarted."},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
{Name: "IP", Type: "string", Priority: 1, Description: corev1.PodStatus{}.SwaggerDoc()["podIP"]},
{Name: "Node", Type: "string", Priority: 1, Description: corev1.PodSpec{}.SwaggerDoc()["nodeName"]},
{Name: "Nominated Node", Type: "string", Priority: 1, Description: corev1.PodStatus{}.SwaggerDoc()["nominatedNodeName"]},
{Name: "Readiness Gates", Type: "string", Priority: 1, Description: corev1.PodSpec{}.SwaggerDoc()["readinessGates"]},
}
_ = h.TableHandler(podColumnDefinitions, printPodList)
_ = h.TableHandler(podColumnDefinitions, printPod)
nodeColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "Status", Type: "string", Description: "The status of the node"},
{Name: "Roles", Type: "string", Description: "The roles of the node"},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
{Name: "Version", Type: "string", Description: corev1.NodeSystemInfo{}.SwaggerDoc()["kubeletVersion"]},
{Name: "Internal-IP", Type: "string", Priority: 1, Description: corev1.NodeStatus{}.SwaggerDoc()["addresses"]},
{Name: "External-IP", Type: "string", Priority: 1, Description: corev1.NodeStatus{}.SwaggerDoc()["addresses"]},
{Name: "OS-Image", Type: "string", Priority: 1, Description: corev1.NodeSystemInfo{}.SwaggerDoc()["osImage"]},
{Name: "Kernel-Version", Type: "string", Priority: 1, Description: corev1.NodeSystemInfo{}.SwaggerDoc()["kernelVersion"]},
{Name: "Container-Runtime", Type: "string", Priority: 1, Description: corev1.NodeSystemInfo{}.SwaggerDoc()["containerRuntimeVersion"]},
}
_ = h.TableHandler(nodeColumnDefinitions, printNode)
_ = h.TableHandler(nodeColumnDefinitions, printNodeList)
}
func printNode(obj *corev1.Node, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
conditionMap := make(map[corev1.NodeConditionType]*corev1.NodeCondition)
NodeAllConditions := []corev1.NodeConditionType{corev1.NodeReady}
for i := range obj.Status.Conditions {
cond := obj.Status.Conditions[i]
conditionMap[cond.Type] = &cond
}
var status []string
for _, validCondition := range NodeAllConditions {
if condition, ok := conditionMap[validCondition]; ok {
if condition.Status == corev1.ConditionTrue {
status = append(status, string(condition.Type))
} else {
status = append(status, "Not"+string(condition.Type))
}
}
}
if len(status) == 0 {
status = append(status, "Unknown")
}
if obj.Spec.Unschedulable {
status = append(status, "SchedulingDisabled")
}
roles := strings.Join(findNodeRoles(obj), ",")
if len(roles) == 0 {
roles = "<none>"
}
row.Cells = append(row.Cells, obj.Name, strings.Join(status, ","), roles, translateTimestampSince(obj.CreationTimestamp), obj.Status.NodeInfo.KubeletVersion)
if options.Wide {
osImage, kernelVersion, crVersion := obj.Status.NodeInfo.OSImage, obj.Status.NodeInfo.KernelVersion, obj.Status.NodeInfo.ContainerRuntimeVersion
if osImage == "" {
osImage = "<unknown>"
}
if kernelVersion == "" {
kernelVersion = "<unknown>"
}
if crVersion == "" {
crVersion = "<unknown>"
}
row.Cells = append(row.Cells, getNodeInternalIP(obj), getNodeExternalIP(obj), osImage, kernelVersion, crVersion)
}
return []metav1.TableRow{row}, nil
}
// Returns first external ip of the node or "<none>" if none is found.
func getNodeExternalIP(node *corev1.Node) string {
for _, address := range node.Status.Addresses {
if address.Type == corev1.NodeExternalIP {
return address.Address
}
}
return "<none>"
}
// Returns the internal IP of the node or "<none>" if none is found.
func getNodeInternalIP(node *corev1.Node) string {
for _, address := range node.Status.Addresses {
if address.Type == corev1.NodeInternalIP {
return address.Address
}
}
return "<none>"
}
const (
// labelNodeRolePrefix is a label prefix for node roles
// It's copied over to here until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112
labelNodeRolePrefix = "node-role.kubernetes.io/"
// nodeLabelRole specifies the role of a node
nodeLabelRole = "kubernetes.io/role"
)
// findNodeRoles returns the roles of a given node.
// The roles are determined by looking for:
// * a node-role.kubernetes.io/<role>="" label
// * a kubernetes.io/role="<role>" label
func findNodeRoles(node *corev1.Node) []string {
roles := sets.NewString()
for k, v := range node.Labels {
switch {
case strings.HasPrefix(k, labelNodeRolePrefix):
if role := strings.TrimPrefix(k, labelNodeRolePrefix); len(role) > 0 {
roles.Insert(role)
}
case k == nodeLabelRole && v != "":
roles.Insert(v)
}
}
return roles.List()
}
func printNodeList(list *corev1.NodeList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printNode(&list.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printPodList(podList *corev1.PodList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(podList.Items))
for i := range podList.Items {
r, err := printPod(&podList.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printPod(pod *corev1.Pod, options printers.GenerateOptions) ([]metav1.TableRow, error) {
restarts := 0
totalContainers := len(pod.Spec.Containers)
readyContainers := 0
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
row := metav1.TableRow{
Object: runtime.RawExtension{Object: pod},
}
switch pod.Status.Phase {
case corev1.PodSucceeded:
row.Conditions = podSuccessConditions
case corev1.PodFailed:
row.Conditions = podFailedConditions
}
initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
restarts += int(container.RestartCount)
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
}
} else {
reason = "Init:" + container.State.Terminated.Reason
}
initializing = true
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
reason = "Init:" + container.State.Waiting.Reason
initializing = true
default:
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
initializing = true
}
break
}
if !initializing {
restarts = 0
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
restarts += int(container.RestartCount)
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
reason = container.State.Waiting.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
reason = container.State.Terminated.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
}
} else if container.Ready && container.State.Running != nil {
hasRunning = true
readyContainers++
}
}
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
if reason == "Completed" && hasRunning {
if hasPodReadyCondition(pod.Status.Conditions) {
reason = "Running"
} else {
reason = "NotReady"
}
}
}
if pod.DeletionTimestamp != nil && pod.Status.Reason == NodeUnreachablePodReason {
reason = "Unknown"
} else if pod.DeletionTimestamp != nil {
reason = "Terminating"
}
row.Cells = append(row.Cells, pod.Name, fmt.Sprintf("%d/%d", readyContainers, totalContainers), reason, int64(restarts), translateTimestampSince(pod.CreationTimestamp))
if options.Wide {
nodeName := pod.Spec.NodeName
nominatedNodeName := pod.Status.NominatedNodeName
podIP := ""
if len(pod.Status.PodIPs) > 0 {
podIP = pod.Status.PodIPs[0].IP
}
if podIP == "" {
podIP = "<none>"
}
if nodeName == "" {
nodeName = "<none>"
}
if nominatedNodeName == "" {
nominatedNodeName = "<none>"
}
readinessGates := "<none>"
if len(pod.Spec.ReadinessGates) > 0 {
trueConditions := 0
for _, readinessGate := range pod.Spec.ReadinessGates {
conditionType := readinessGate.ConditionType
for _, condition := range pod.Status.Conditions {
if condition.Type == conditionType {
if condition.Status == corev1.ConditionTrue {
trueConditions++
}
break
}
}
}
readinessGates = fmt.Sprintf("%d/%d", trueConditions, len(pod.Spec.ReadinessGates))
}
row.Cells = append(row.Cells, podIP, nodeName, nominatedNodeName, readinessGates)
}
return []metav1.TableRow{row}, nil
}
func hasPodReadyCondition(conditions []corev1.PodCondition) bool {
for _, condition := range conditions {
if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue {
return true
}
}
return false
}
// translateTimestampSince returns the elapsed time since timestamp in
// human-readable approximation.
func translateTimestampSince(timestamp metav1.Time) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.HumanDuration(time.Since(timestamp.Time))
}