mirror of https://github.com/rancher/dashboard.git
fix monitoring metric aggregations on cluster dash
This commit is contained in:
parent
442ac831db
commit
2f16ba140e
|
|
@ -67,7 +67,7 @@ export default {
|
|||
<h3>
|
||||
{{ name }}
|
||||
</h3>
|
||||
<div v-if=" reserved && (reserved.total || reserved.useful)" class="">
|
||||
<div v-if=" reserved && (reserved.total || reserved.useful)">
|
||||
<ConsumptionGauge
|
||||
:capacity="reserved.total"
|
||||
:used="reserved.useful"
|
||||
|
|
@ -83,7 +83,7 @@ export default {
|
|||
</template>
|
||||
</ConsumptionGauge>
|
||||
</div>
|
||||
<div v-if=" used && used.useful">
|
||||
<div v-if=" used && used.useful" class="mt-20">
|
||||
<ConsumptionGauge
|
||||
:capacity="used.total"
|
||||
:used="used.useful"
|
||||
|
|
@ -91,7 +91,7 @@ export default {
|
|||
>
|
||||
<template #title>
|
||||
<span>
|
||||
{{ t('clusterIndexPage.hardwareResourceGauge.used') }} <span class="values text-muted">{{ used.useful }} / {{ used.total }} {{ used.units }}</span>
|
||||
{{ t('clusterIndexPage.hardwareResourceGauge.used') }} <span class="values text-muted">{{ maxDecimalPlaces(used.useful) }} / {{ maxDecimalPlaces(used.total) }} {{ used.units }}</span>
|
||||
</span>
|
||||
<span>
|
||||
{{ percentage(used) }}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { CATALOG } from '@/config/labels-annotations';
|
||||
import { FLEET, MANAGEMENT, NODE } from '@/config/types';
|
||||
import { FLEET, MANAGEMENT } from '@/config/types';
|
||||
import { insertAt } from '@/utils/array';
|
||||
import { downloadFile } from '@/utils/download';
|
||||
import { parseSi } from '@/utils/units';
|
||||
|
|
@ -316,36 +316,6 @@ export default {
|
|||
};
|
||||
},
|
||||
|
||||
fetchNodeMetrics() {
|
||||
return async() => {
|
||||
const nodes = await this.$dispatch('cluster/findAll', { type: NODE }, { root: true });
|
||||
const nodeMetrics = await this.$dispatch('cluster/findAll', { type: NODE }, { root: true });
|
||||
|
||||
const someNonWorkerRoles = nodes.some(node => node.hasARole && !node.isWorker);
|
||||
|
||||
const metrics = nodeMetrics.filter((metric) => {
|
||||
const node = nodes.find(nd => nd.id === metric.id);
|
||||
|
||||
return node && (!someNonWorkerRoles || node.isWorker);
|
||||
});
|
||||
const initialAggregation = {
|
||||
cpu: 0,
|
||||
memory: 0
|
||||
};
|
||||
|
||||
if (isEmpty(metrics)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return metrics.reduce((agg, metric) => {
|
||||
agg.cpu += parseSi(metric?.usage?.cpu);
|
||||
agg.memory += parseSi(metric?.usage?.memory);
|
||||
|
||||
return agg;
|
||||
}, initialAggregation);
|
||||
};
|
||||
},
|
||||
|
||||
nodes() {
|
||||
return this.$getters['all'](MANAGEMENT.NODE).filter(node => node.id.startsWith(this.id));
|
||||
},
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ import metricPoller from '@/mixins/metric-poller';
|
|||
import EmberPage from '@/components/EmberPage';
|
||||
import ResourceSummary, { resourceCounts } from '@/components/ResourceSummary';
|
||||
import HardwareResourceGauge from '@/components/HardwareResourceGauge';
|
||||
import { isEmpty } from '@/utils/object';
|
||||
|
||||
export const RESOURCES = [NAMESPACE, INGRESS, PV, WORKLOAD_TYPES.DEPLOYMENT, WORKLOAD_TYPES.STATEFUL_SET, WORKLOAD_TYPES.JOB, WORKLOAD_TYPES.DAEMON_SET, SERVICE];
|
||||
|
||||
|
|
@ -87,7 +88,6 @@ export default {
|
|||
for ( const k in res ) {
|
||||
this[k] = res[k];
|
||||
}
|
||||
this.metricAggregations = await this.currentCluster.fetchNodeMetrics();
|
||||
},
|
||||
|
||||
data() {
|
||||
|
|
@ -136,7 +136,6 @@ export default {
|
|||
nodeMetrics: [],
|
||||
nodeTemplates: [],
|
||||
nodes: [],
|
||||
metricAggregations: {},
|
||||
showClusterMetrics: false,
|
||||
showK8sMetrics: false,
|
||||
showEtcdMetrics: false,
|
||||
|
|
@ -230,6 +229,31 @@ export default {
|
|||
return createMemoryValues(this.currentCluster?.status?.allocatable?.memory, this.currentCluster?.status?.requested?.memory);
|
||||
},
|
||||
|
||||
metricAggregations() {
|
||||
const nodes = this.nodes;
|
||||
const someNonWorkerRoles = this.nodes.some(node => node.hasARole && !node.isWorker);
|
||||
const metrics = this.nodeMetrics.filter((nodeMetrics) => {
|
||||
const node = nodes.find(nd => nd.id === nodeMetrics.id);
|
||||
|
||||
return node && (!someNonWorkerRoles || node.isWorker);
|
||||
});
|
||||
const initialAggregation = {
|
||||
cpu: 0,
|
||||
memory: 0
|
||||
};
|
||||
|
||||
if (isEmpty(metrics)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return metrics.reduce((agg, metric) => {
|
||||
agg.cpu += parseSi(metric.usage.cpu);
|
||||
agg.memory += parseSi(metric.usage.memory);
|
||||
|
||||
return agg;
|
||||
}, initialAggregation);
|
||||
},
|
||||
|
||||
cpuUsed() {
|
||||
return {
|
||||
total: parseSi(this.currentCluster?.status?.capacity?.cpu),
|
||||
|
|
|
|||
Loading…
Reference in New Issue