Changes to kube node list

- Make type node seen in explorer / node list clearer
  - also mention where lifecycle management can be done
- Remove node pool groups for rke1, so there are no pools at all in this view
- Fix #3355
This commit is contained in:
Richard Cox 2021-07-02 16:11:06 +01:00
parent f3390b2a1d
commit 8d5aa08b40
2 changed files with 31 additions and 128 deletions

View File

@ -4292,6 +4292,7 @@ typeDescription:
monitoring.coreos.com.prometheusrule: A Prometheus Rule resource defines both recording and/or alert rules. A recording rule can pre-compute values and save the results. Alerting rules allow you to define conditions on when to send notifications to AlertManager.
monitoring.coreos.com.prometheus: A Prometheus server is a Prometheus deployment whose scrape configuration and rules are determined by selected ServiceMonitors, PodMonitors, and PrometheusRules and whose alerts will be sent to all selected Alertmanagers with the custom resource's configuration.
monitoring.coreos.com.alertmanager: An alert manager is deployment whose configuration will be specified by a secret in the same namespace, which determines which alerts should go to which receiver.
node: The base Kubernetes Node resource represents a virtual or physical machine which hosts deployments. To manage the machine lifecyle, if available, go to Cluster Management.
catalog.cattle.io.clusterrepo: 'A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster.'
catalog.cattle.io.operation: An operation is the list of recent Helm operations that have been applied to the cluster.
catalog.cattle.io.app: An installed application is a Helm 3 chart that was installed either via our charts or through the Helm CLI.
@ -4464,6 +4465,11 @@ typeLabel:
one { Namespace }
other { Namespaces }
}
node: |-
{count, plural,
one { Kube Node }
other { Kube Nodes }
}
group.principal: |-
{count, plural,
one { Group }

View File

@ -10,7 +10,6 @@ import metricPoller from '@/mixins/metric-poller';
import {
MANAGEMENT, METRIC, NODE, NORMAN, POD
} from '@/config/types';
import { mapGetters } from 'vuex';
import { allHash } from '@/utils/promise';
import { get } from '@/utils/object';
import { GROUP_RESOURCES, mapPref } from '@/store/prefs';
@ -33,25 +32,21 @@ export default {
async fetch() {
const hash = { kubeNodes: this.$store.dispatch('cluster/findAll', { type: NODE }) };
const canViewNodePools = this.$store.getters[`management/schemaFor`](MANAGEMENT.NODE_POOL);
const canViewNodeTemplates = this.$store.getters[`management/schemaFor`](MANAGEMENT.NODE_TEMPLATE);
const canViewPods = this.$store.getters[`cluster/schemaFor`](POD);
const canViewMgmtNodes = this.$store.getters[`management/schemaFor`](MANAGEMENT.NODE);
const canViewNormanNodes = this.$store.getters[`rancher/schemaFor`](NORMAN.NODE);
this.canViewPods = this.$store.getters[`cluster/schemaFor`](POD);
if (canViewNormanNodes) {
// Required for Drain action
// Required for Drain/Cordon action
hash.normanNodes = this.$store.dispatch('rancher/findAll', { type: NORMAN.NODE });
}
if (canViewNodePools && canViewNodeTemplates) {
// Managemnet Node's required for kube role and some resource states
if (canViewMgmtNodes) {
hash.mgmtNodes = this.$store.dispatch('management/findAll', { type: MANAGEMENT.NODE });
hash.nodePools = this.$store.dispatch('management/findAll', { type: MANAGEMENT.NODE_POOL });
hash.nodeTemplates = this.$store.dispatch('management/findAll', { type: MANAGEMENT.NODE_TEMPLATE });
}
if (canViewPods) {
if (this.canViewPods) {
// Used for running pods metrics
hash.pods = this.$store.dispatch('cluster/findAll', { type: POD });
}
@ -59,67 +54,38 @@ export default {
const res = await allHash(hash);
this.kubeNodes = res.kubeNodes;
this.nodePools = res.nodePools || [];
this.nodeTemplates = res.nodeTemplates || [];
await this.updateNodePools(res.kubeNodes);
},
data() {
return {
kubeNodes: null,
nodeTemplates: null,
nodePools: null,
headers: [STATE, NAME, ROLES, VERSION, INTERNAL_EXTERNAL_IP, {
canViewPods: false,
};
},
computed: {
tableGroup: mapPref(GROUP_RESOURCES),
headers() {
const headers = [STATE, NAME, ROLES, VERSION, INTERNAL_EXTERNAL_IP, {
...CPU,
breakpoint: COLUMN_BREAKPOINTS.LAPTOP
}, {
...RAM,
breakpoint: COLUMN_BREAKPOINTS.LAPTOP
}, {
}];
if (this.canViewPods) {
headers.push({
...PODS,
breakpoint: COLUMN_BREAKPOINTS.DESKTOP
}, AGE],
};
},
computed: {
...mapGetters(['currentCluster']),
tableGroup: mapPref(GROUP_RESOURCES),
clusterNodePools() {
return this.nodePools?.filter(pool => pool?.spec?.clusterName === this.currentCluster.id) || [];
},
clusterNodePoolsMap() {
return this.clusterNodePools.reduce((res, node) => {
res[node.id] = node;
return res;
}, {});
},
hasPools() {
return !!this.clusterNodePools.length;
},
groupBy() {
if (!this.hasPools) {
return null;
}
return this.tableGroup === 'none' ? '' : 'nodePoolId';
});
}
headers.push(AGE);
return headers;
},
watch: {
kubeNodes: {
deep: true,
handler(neu, old) {
this.updateNodePools(neu);
}
},
},
methods: {
@ -136,26 +102,6 @@ export default {
}
},
updateNodePools(nodes = []) {
nodes.forEach((node) => {
const sNode = node.managementNode;
if (sNode) {
node.nodePoolId = sNode.spec.nodePoolName?.replace(':', '/') || '' ;
}
});
},
getNodePoolFromTableGroup(group) {
return this.getNodePool(group.key);
},
getNodeTemplate(nodeTemplateName) {
const parsedName = nodeTemplateName.replace(':', '/');
return this.nodeTemplates.find(nt => nt.id === parsedName);
},
get,
}
@ -170,27 +116,10 @@ export default {
v-bind="$attrs"
:schema="schema"
:headers="headers"
:rows="[...kubeNodes]"
:groupable="hasPools"
:group-by="groupBy"
group-tooltip="node.list.pool"
:rows="kubeNodes"
:sub-rows="true"
v-on="$listeners"
>
<template #group-by="{group}">
<div class="pool-row" :class="{'has-description':clusterNodePoolsMap[group.key] && clusterNodePoolsMap[group.key].nodeTemplate}">
<div v-trim-whitespace class="group-tab">
<div v-if="clusterNodePoolsMap[group.key]" class="project-name" v-html="t('resourceTable.groupLabel.nodePool', { name: clusterNodePoolsMap[group.key].spec.hostnamePrefix, count: group.rows.length}, true)">
</div>
<div v-else class="project-name" v-html="t('resourceTable.groupLabel.notInANodePool')">
</div>
<div v-if="clusterNodePoolsMap[group.key] && clusterNodePoolsMap[group.key].nodeTemplate" class="description text-muted text-small">
{{ clusterNodePoolsMap[group.key].providerDisplay }} &ndash; {{ clusterNodePoolsMap[group.key].providerLocation }} / {{ clusterNodePoolsMap[group.key].providerSize }} ({{ clusterNodePoolsMap[group.key].providerName }})
</div>
</div>
</div>
</template>
<template #sub-row="{fullColspan, row}">
<tr class="taints sub-row" :class="{'empty-taints': !row.spec.taints || !row.spec.taints.length}">
<template v-if="row.spec.taints && row.spec.taints.length">
@ -212,38 +141,6 @@ export default {
</template>
<style lang='scss' scoped>
.pool-row {
display: flex;
justify-content: space-between;
.project-name {
line-height: 30px;
}
&.has-description {
.right {
margin-top: 5px;
}
.group-tab {
&, &::after {
height: 50px;
}
&::after {
right: -20px;
}
.description {
margin-top: -20px;
}
}
}
BUTTON {
line-height: 1em;
}
}
.taints {
td {
padding-top:0;