support node/pod TableConvertor.

Signed-off-by: niuyueyang <719415781@qq.com>
This commit is contained in:
niuyueyang 2024-01-25 14:18:43 +08:00
parent 5ec02b9a14
commit 4aecca59ef
5 changed files with 533 additions and 6 deletions

View File

@ -22,17 +22,27 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/rest"
"github.com/karmada-io/karmada/pkg/printers"
)
// TableConvertor struct - converts objects to metav1.Table using printers.TableGenerator
type TableConvertor struct {
defaultTableConvert rest.TableConvertor
printers.TableGenerator
}
// ConvertToTable method - converts objects to metav1.Table objects using TableGenerator
func (c TableConvertor) ConvertToTable(_ context.Context, obj runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
// NewTableConvertor create TableConvertor struct with defaultTableConvert and TableGenerator
func NewTableConvertor(defaultTableConvert rest.TableConvertor, tableGenerator printers.TableGenerator) rest.TableConvertor {
return &TableConvertor{
defaultTableConvert: defaultTableConvert,
TableGenerator: tableGenerator,
}
}
// ConvertToTable method - converts objects to metav1.Table objects using TableGenerator and defaultTableConvert
func (c TableConvertor) ConvertToTable(ctx context.Context, obj runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
noHeaders := false
if tableOptions != nil {
switch t := tableOptions.(type) {
@ -44,5 +54,12 @@ func (c TableConvertor) ConvertToTable(_ context.Context, obj runtime.Object, ta
return nil, fmt.Errorf("unrecognized type %T for table options, can't display tabular output", tableOptions)
}
}
return c.TableGenerator.GenerateTable(obj, printers.GenerateOptions{Wide: true, NoHeaders: noHeaders})
tableResult, err := c.TableGenerator.GenerateTable(obj, printers.GenerateOptions{Wide: true, NoHeaders: noHeaders})
if err == nil {
return tableResult, err
}
if c.defaultTableConvert == nil {
return tableResult, err
}
return c.defaultTableConvert.ConvertToTable(ctx, obj, tableOptions)
}

View File

@ -0,0 +1,89 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"fmt"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/registry/rest"
"github.com/karmada-io/karmada/pkg/printers"
printersinternal "github.com/karmada-io/karmada/pkg/util/lifted"
)
func TestPrintHandlerStorage(t *testing.T) {
convert := NewTableConvertor(
rest.NewDefaultTableConvertor(schema.GroupResource{}),
printers.NewTableGenerator().With(printersinternal.AddCoreV1Handlers))
testCases := []struct {
object runtime.Object
}{
{
object: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "test1"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodPending,
HostIP: "1.2.3.4",
PodIP: "2.3.4.5",
},
},
},
{
object: &v1.Node{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Phase: v1.NodeTerminated,
Addresses: []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "1.2.3.4",
},
},
},
},
},
{
object: &v1.Service{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Name: "svc1"},
Spec: v1.ServiceSpec{
Selector: nil,
ClusterIP: "4.4.4.4",
Type: v1.ServiceTypeClusterIP,
},
},
},
}
for i, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("object=%#v", tc.object), func(t *testing.T) {
_, err := convert.ConvertToTable(context.Background(), tc.object, nil)
if err != nil {
t.Errorf("index %v ConvertToTable error: %#v", i, err)
}
})
}
}

View File

@ -32,9 +32,12 @@ import (
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/client-go/kubernetes/scheme"
"github.com/karmada-io/karmada/pkg/printers"
printerstorage "github.com/karmada-io/karmada/pkg/printers/storage"
"github.com/karmada-io/karmada/pkg/search/proxy/framework"
pluginruntime "github.com/karmada-io/karmada/pkg/search/proxy/framework/runtime"
"github.com/karmada-io/karmada/pkg/search/proxy/store"
printerslifted "github.com/karmada-io/karmada/pkg/util/lifted"
)
const (
@ -83,9 +86,11 @@ func (c *Cache) SupportRequest(request framework.ProxyRequest) bool {
func (c *Cache) Connect(_ context.Context, request framework.ProxyRequest) (http.Handler, error) {
requestInfo := request.RequestInfo
r := &rester{
store: c.store,
gvr: request.GroupVersionResource,
tableConvertor: rest.NewDefaultTableConvertor(request.GroupVersionResource.GroupResource()),
store: c.store,
gvr: request.GroupVersionResource,
tableConvertor: printerstorage.NewTableConvertor(
rest.NewDefaultTableConvertor(request.GroupVersionResource.GroupResource()),
printers.NewTableGenerator().With(printerslifted.AddCoreV1Handlers)),
}
gvk, err := c.restMapper.KindFor(request.GroupVersionResource)
@ -108,6 +113,7 @@ func (c *Cache) Connect(_ context.Context, request framework.ProxyRequest) (http
Convertor: runtime.NewScheme(),
Subresource: requestInfo.Subresource,
MetaGroupVersion: metav1.SchemeGroupVersion,
TableConvertor: r.tableConvertor,
}
var h http.Handler

View File

@ -0,0 +1,348 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifted
import (
"fmt"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/duration"
"github.com/karmada-io/karmada/pkg/printers"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
// NodeUnreachablePodReason is the reason on a pod when its state cannot be confirmed as kubelet is unresponsive
// on the node it is (was) running.
NodeUnreachablePodReason = "NodeLost"
)
var (
podSuccessConditions = []metav1.TableRowCondition{{Type: metav1.RowCompleted, Status: metav1.ConditionTrue, Reason: string(corev1.PodSucceeded), Message: "The pod has completed successfully."}}
podFailedConditions = []metav1.TableRowCondition{{Type: metav1.RowCompleted, Status: metav1.ConditionTrue, Reason: string(corev1.PodFailed), Message: "The pod failed."}}
)
// AddCoreV1Handlers adds print handlers for core with v1 versions.
func AddCoreV1Handlers(h printers.PrintHandler) {
podColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "Ready", Type: "string", Description: "The aggregate readiness state of this pod for accepting traffic."},
{Name: "Status", Type: "string", Description: "The aggregate status of the containers in this pod."},
{Name: "Restarts", Type: "integer", Description: "The number of times the containers in this pod have been restarted."},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
{Name: "IP", Type: "string", Priority: 1, Description: corev1.PodStatus{}.SwaggerDoc()["podIP"]},
{Name: "Node", Type: "string", Priority: 1, Description: corev1.PodSpec{}.SwaggerDoc()["nodeName"]},
{Name: "Nominated Node", Type: "string", Priority: 1, Description: corev1.PodStatus{}.SwaggerDoc()["nominatedNodeName"]},
{Name: "Readiness Gates", Type: "string", Priority: 1, Description: corev1.PodSpec{}.SwaggerDoc()["readinessGates"]},
}
_ = h.TableHandler(podColumnDefinitions, printPodList)
_ = h.TableHandler(podColumnDefinitions, printPod)
nodeColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "Status", Type: "string", Description: "The status of the node"},
{Name: "Roles", Type: "string", Description: "The roles of the node"},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
{Name: "Version", Type: "string", Description: corev1.NodeSystemInfo{}.SwaggerDoc()["kubeletVersion"]},
{Name: "Internal-IP", Type: "string", Priority: 1, Description: corev1.NodeStatus{}.SwaggerDoc()["addresses"]},
{Name: "External-IP", Type: "string", Priority: 1, Description: corev1.NodeStatus{}.SwaggerDoc()["addresses"]},
{Name: "OS-Image", Type: "string", Priority: 1, Description: corev1.NodeSystemInfo{}.SwaggerDoc()["osImage"]},
{Name: "Kernel-Version", Type: "string", Priority: 1, Description: corev1.NodeSystemInfo{}.SwaggerDoc()["kernelVersion"]},
{Name: "Container-Runtime", Type: "string", Priority: 1, Description: corev1.NodeSystemInfo{}.SwaggerDoc()["containerRuntimeVersion"]},
}
_ = h.TableHandler(nodeColumnDefinitions, printNode)
_ = h.TableHandler(nodeColumnDefinitions, printNodeList)
}
func printNode(obj *corev1.Node, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
conditionMap := make(map[corev1.NodeConditionType]*corev1.NodeCondition)
NodeAllConditions := []corev1.NodeConditionType{corev1.NodeReady}
for i := range obj.Status.Conditions {
cond := obj.Status.Conditions[i]
conditionMap[cond.Type] = &cond
}
var status []string
for _, validCondition := range NodeAllConditions {
if condition, ok := conditionMap[validCondition]; ok {
if condition.Status == corev1.ConditionTrue {
status = append(status, string(condition.Type))
} else {
status = append(status, "Not"+string(condition.Type))
}
}
}
if len(status) == 0 {
status = append(status, "Unknown")
}
if obj.Spec.Unschedulable {
status = append(status, "SchedulingDisabled")
}
roles := strings.Join(findNodeRoles(obj), ",")
if len(roles) == 0 {
roles = "<none>"
}
row.Cells = append(row.Cells, obj.Name, strings.Join(status, ","), roles, translateTimestampSince(obj.CreationTimestamp), obj.Status.NodeInfo.KubeletVersion)
if options.Wide {
osImage, kernelVersion, crVersion := obj.Status.NodeInfo.OSImage, obj.Status.NodeInfo.KernelVersion, obj.Status.NodeInfo.ContainerRuntimeVersion
if osImage == "" {
osImage = "<unknown>"
}
if kernelVersion == "" {
kernelVersion = "<unknown>"
}
if crVersion == "" {
crVersion = "<unknown>"
}
row.Cells = append(row.Cells, getNodeInternalIP(obj), getNodeExternalIP(obj), osImage, kernelVersion, crVersion)
}
return []metav1.TableRow{row}, nil
}
// Returns first external ip of the node or "<none>" if none is found.
func getNodeExternalIP(node *corev1.Node) string {
for _, address := range node.Status.Addresses {
if address.Type == corev1.NodeExternalIP {
return address.Address
}
}
return "<none>"
}
// Returns the internal IP of the node or "<none>" if none is found.
func getNodeInternalIP(node *corev1.Node) string {
for _, address := range node.Status.Addresses {
if address.Type == corev1.NodeInternalIP {
return address.Address
}
}
return "<none>"
}
const (
// labelNodeRolePrefix is a label prefix for node roles
// It's copied over to here until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112
labelNodeRolePrefix = "node-role.kubernetes.io/"
// nodeLabelRole specifies the role of a node
nodeLabelRole = "kubernetes.io/role"
)
// findNodeRoles returns the roles of a given node.
// The roles are determined by looking for:
// * a node-role.kubernetes.io/<role>="" label
// * a kubernetes.io/role="<role>" label
func findNodeRoles(node *corev1.Node) []string {
roles := sets.NewString()
for k, v := range node.Labels {
switch {
case strings.HasPrefix(k, labelNodeRolePrefix):
if role := strings.TrimPrefix(k, labelNodeRolePrefix); len(role) > 0 {
roles.Insert(role)
}
case k == nodeLabelRole && v != "":
roles.Insert(v)
}
}
return roles.List()
}
func printNodeList(list *corev1.NodeList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printNode(&list.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printPodList(podList *corev1.PodList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(podList.Items))
for i := range podList.Items {
r, err := printPod(&podList.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printPod(pod *corev1.Pod, options printers.GenerateOptions) ([]metav1.TableRow, error) {
restarts := 0
totalContainers := len(pod.Spec.Containers)
readyContainers := 0
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
row := metav1.TableRow{
Object: runtime.RawExtension{Object: pod},
}
switch pod.Status.Phase {
case corev1.PodSucceeded:
row.Conditions = podSuccessConditions
case corev1.PodFailed:
row.Conditions = podFailedConditions
}
initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
restarts += int(container.RestartCount)
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
}
} else {
reason = "Init:" + container.State.Terminated.Reason
}
initializing = true
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
reason = "Init:" + container.State.Waiting.Reason
initializing = true
default:
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
initializing = true
}
break
}
if !initializing {
restarts = 0
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
restarts += int(container.RestartCount)
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
reason = container.State.Waiting.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
reason = container.State.Terminated.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
}
} else if container.Ready && container.State.Running != nil {
hasRunning = true
readyContainers++
}
}
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
if reason == "Completed" && hasRunning {
if hasPodReadyCondition(pod.Status.Conditions) {
reason = "Running"
} else {
reason = "NotReady"
}
}
}
if pod.DeletionTimestamp != nil && pod.Status.Reason == NodeUnreachablePodReason {
reason = "Unknown"
} else if pod.DeletionTimestamp != nil {
reason = "Terminating"
}
row.Cells = append(row.Cells, pod.Name, fmt.Sprintf("%d/%d", readyContainers, totalContainers), reason, int64(restarts), translateTimestampSince(pod.CreationTimestamp))
if options.Wide {
nodeName := pod.Spec.NodeName
nominatedNodeName := pod.Status.NominatedNodeName
podIP := ""
if len(pod.Status.PodIPs) > 0 {
podIP = pod.Status.PodIPs[0].IP
}
if podIP == "" {
podIP = "<none>"
}
if nodeName == "" {
nodeName = "<none>"
}
if nominatedNodeName == "" {
nominatedNodeName = "<none>"
}
readinessGates := "<none>"
if len(pod.Spec.ReadinessGates) > 0 {
trueConditions := 0
for _, readinessGate := range pod.Spec.ReadinessGates {
conditionType := readinessGate.ConditionType
for _, condition := range pod.Status.Conditions {
if condition.Type == conditionType {
if condition.Status == corev1.ConditionTrue {
trueConditions++
}
break
}
}
}
readinessGates = fmt.Sprintf("%d/%d", trueConditions, len(pod.Spec.ReadinessGates))
}
row.Cells = append(row.Cells, podIP, nodeName, nominatedNodeName, readinessGates)
}
return []metav1.TableRow{row}, nil
}
func hasPodReadyCondition(conditions []corev1.PodCondition) bool {
for _, condition := range conditions {
if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue {
return true
}
}
return false
}
// translateTimestampSince returns the elapsed time since timestamp in
// human-readable approximation.
func translateTimestampSince(timestamp metav1.Time) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.HumanDuration(time.Since(timestamp.Time))
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2021 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifted
import (
"fmt"
"reflect"
"testing"
"github.com/karmada-io/karmada/pkg/printers"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff"
)
func TestPrintCoreV1(t *testing.T) {
testCases := []struct {
pod corev1.Pod
generateOptions printers.GenerateOptions
expect []metav1.TableRow
}{
// Test name, kubernetes version, sync mode, cluster ready status,
{
corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "test1"},
Spec: corev1.PodSpec{},
Status: corev1.PodStatus{
Phase: corev1.PodPending,
HostIP: "1.2.3.4",
PodIP: "2.3.4.5",
},
},
printers.GenerateOptions{Wide: false},
[]metav1.TableRow{{Cells: []interface{}{"test1", "0/0", "Pending", int64(0), "<unknown>"}}},
},
}
for i, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("pod=%s, expected value=%v", tc.pod.Name, tc.expect), func(t *testing.T) {
rows, err := printPod(&tc.pod, tc.generateOptions)
if err != nil {
t.Fatal(err)
}
for i := range rows {
rows[i].Object.Object = nil
}
if !reflect.DeepEqual(tc.expect, rows) {
t.Errorf("%d mismatch: %s", i, diff.ObjectReflectDiff(tc.expect, rows))
}
})
}
}