Compare commits

..

2 Commits

Author SHA1 Message Date
kdeng46 cf0248c746 Gemini Review: consise error message
Signed-off-by: kdeng46 <kdeng46@bloomberg.net>
2025-07-22 12:18:39 -04:00
kdeng46 e6b71ad03a Enhance logging in DeploymentReplicasSyncer for better clarity and debugging
Signed-off-by: kdeng46 <kdeng46@bloomberg.net>
2025-07-22 12:09:14 -04:00
30 changed files with 305 additions and 484 deletions

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM alpine:3.22.1
FROM alpine:3.22.0
ARG BINARY

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM alpine:3.22.1
FROM alpine:3.22.0
ARG BINARY
ARG TARGETPLATFORM

View File

@ -18,7 +18,6 @@ package app
import (
"context"
"fmt"
"net/http"
"testing"
"time"
@ -28,7 +27,6 @@ import (
"github.com/karmada-io/karmada/cmd/descheduler/app/options"
"github.com/karmada-io/karmada/pkg/util/names"
testingutil "github.com/karmada-io/karmada/pkg/util/testing"
)
func TestNewDeschedulerCommand(t *testing.T) {
@ -68,10 +66,8 @@ func TestDeschedulerCommandFlagParsing(t *testing.T) {
}
func TestServeHealthzAndMetrics(t *testing.T) {
ports, err := testingutil.GetFreePorts("127.0.0.1", 2)
require.NoError(t, err)
healthAddress := fmt.Sprintf("127.0.0.1:%d", ports[0])
metricsAddress := fmt.Sprintf("127.0.0.1:%d", ports[1])
healthAddress := "127.0.0.1:8082"
metricsAddress := "127.0.0.1:8083"
go serveHealthzAndMetrics(healthAddress, metricsAddress)

View File

@ -18,7 +18,6 @@ package app
import (
"context"
"fmt"
"net/http"
"testing"
"time"
@ -28,7 +27,6 @@ import (
"github.com/karmada-io/karmada/cmd/scheduler/app/options"
"github.com/karmada-io/karmada/pkg/util/names"
testingutil "github.com/karmada-io/karmada/pkg/util/testing"
)
func TestNewSchedulerCommand(t *testing.T) {
@ -68,10 +66,8 @@ func TestSchedulerCommandFlagParsing(t *testing.T) {
}
func TestServeHealthzAndMetrics(t *testing.T) {
ports, err := testingutil.GetFreePorts("127.0.0.1", 2)
require.NoError(t, err)
healthAddress := fmt.Sprintf("127.0.0.1:%d", ports[0])
metricsAddress := fmt.Sprintf("127.0.0.1:%d", ports[1])
healthAddress := "127.0.0.1:8082"
metricsAddress := "127.0.0.1:8083"
go serveHealthzAndMetrics(healthAddress, metricsAddress)

View File

@ -2,54 +2,48 @@
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
- [v1.12.8](#v1128)
- [Downloads for v1.12.8](#downloads-for-v1128)
- [Changelog since v1.12.7](#changelog-since-v1127)
- [Changes by Kind](#changes-by-kind)
- [Bug Fixes](#bug-fixes)
- [Others](#others)
- [v1.12.7](#v1127)
- [Downloads for v1.12.7](#downloads-for-v1127)
- [Changelog since v1.12.6](#changelog-since-v1126)
- [Changes by Kind](#changes-by-kind-1)
- [Bug Fixes](#bug-fixes-1)
- [Others](#others-1)
- [Changes by Kind](#changes-by-kind)
- [Bug Fixes](#bug-fixes)
- [Others](#others)
- [v1.12.6](#v1126)
- [Downloads for v1.12.6](#downloads-for-v1126)
- [Changelog since v1.12.5](#changelog-since-v1125)
- [Changes by Kind](#changes-by-kind-2)
- [Bug Fixes](#bug-fixes-2)
- [Others](#others-2)
- [Changes by Kind](#changes-by-kind-1)
- [Bug Fixes](#bug-fixes-1)
- [Others](#others-1)
- [v1.12.5](#v1125)
- [Downloads for v1.12.5](#downloads-for-v1125)
- [Changelog since v1.12.4](#changelog-since-v1124)
- [Changes by Kind](#changes-by-kind-3)
- [Bug Fixes](#bug-fixes-3)
- [Others](#others-3)
- [Changes by Kind](#changes-by-kind-2)
- [Bug Fixes](#bug-fixes-2)
- [Others](#others-2)
- [v1.12.4](#v1124)
- [Downloads for v1.12.4](#downloads-for-v1124)
- [Changelog since v1.12.3](#changelog-since-v1123)
- [Changes by Kind](#changes-by-kind-4)
- [Bug Fixes](#bug-fixes-4)
- [Others](#others-4)
- [Changes by Kind](#changes-by-kind-3)
- [Bug Fixes](#bug-fixes-3)
- [Others](#others-3)
- [v1.12.3](#v1123)
- [Downloads for v1.12.3](#downloads-for-v1123)
- [Changelog since v1.12.2](#changelog-since-v1122)
- [Changes by Kind](#changes-by-kind-5)
- [Bug Fixes](#bug-fixes-5)
- [Others](#others-5)
- [Changes by Kind](#changes-by-kind-4)
- [Bug Fixes](#bug-fixes-4)
- [Others](#others-4)
- [v1.12.2](#v1122)
- [Downloads for v1.12.2](#downloads-for-v1122)
- [Changelog since v1.12.1](#changelog-since-v1121)
- [Changes by Kind](#changes-by-kind-6)
- [Bug Fixes](#bug-fixes-6)
- [Others](#others-6)
- [Changes by Kind](#changes-by-kind-5)
- [Bug Fixes](#bug-fixes-5)
- [Others](#others-5)
- [v1.12.1](#v1121)
- [Downloads for v1.12.1](#downloads-for-v1121)
- [Changelog since v1.12.0](#changelog-since-v1120)
- [Changes by Kind](#changes-by-kind-7)
- [Bug Fixes](#bug-fixes-7)
- [Others](#others-7)
- [Changes by Kind](#changes-by-kind-6)
- [Bug Fixes](#bug-fixes-6)
- [Others](#others-6)
- [v1.12.0](#v1120)
- [Downloads for v1.12.0](#downloads-for-v1120)
- [What's New](#whats-new)
@ -60,7 +54,7 @@
- [Other Notable Changes](#other-notable-changes)
- [API Changes](#api-changes)
- [Deprecation](#deprecation)
- [Bug Fixes](#bug-fixes-8)
- [Bug Fixes](#bug-fixes-7)
- [Security](#security)
- [Features & Enhancements](#features--enhancements)
- [Other](#other)
@ -72,11 +66,11 @@
- [Downloads for v1.12.0-beta.0](#downloads-for-v1120-beta0)
- [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1)
- [Urgent Update Notes](#urgent-update-notes)
- [Changes by Kind](#changes-by-kind-8)
- [Changes by Kind](#changes-by-kind-7)
- [API Changes](#api-changes-1)
- [Features & Enhancements](#features--enhancements-1)
- [Deprecation](#deprecation-1)
- [Bug Fixes](#bug-fixes-9)
- [Bug Fixes](#bug-fixes-8)
- [Security](#security-1)
- [Other](#other-1)
- [Dependencies](#dependencies-1)
@ -86,11 +80,11 @@
- [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1)
- [Changelog since v1.11.0](#changelog-since-v1110)
- [Urgent Update Notes](#urgent-update-notes-1)
- [Changes by Kind](#changes-by-kind-9)
- [Changes by Kind](#changes-by-kind-8)
- [API Changes](#api-changes-2)
- [Features & Enhancements](#features--enhancements-2)
- [Deprecation](#deprecation-2)
- [Bug Fixes](#bug-fixes-10)
- [Bug Fixes](#bug-fixes-9)
- [Security](#security-2)
- [Other](#other-2)
- [Dependencies](#dependencies-2)
@ -99,20 +93,6 @@
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
# v1.12.8
## Downloads for v1.12.8
Download v1.12.8 in the [v1.12.8 release page](https://github.com/karmada-io/karmada/releases/tag/v1.12.8).
## Changelog since v1.12.7
### Changes by Kind
#### Bug Fixes
- `karmada-controller-manager`: Fixed the issue that resources will be recreated after being deleted on the cluster when resource is suspended for dispatching. ([#6538](https://github.com/karmada-io/karmada/pull/6538), @luyb177)
- `karmada-controller-manager`: Fixed the issue that EndpointSlice are deleted unexpectedly due to the EndpointSlice informer cache not being synced. ([#6585](https://github.com/karmada-io/karmada/pull/6585), @XiShanYongYe-Chang)
#### Others
- The base image `alpine` now has been promoted from 3.22.0 to 3.22.1. ([#6562](https://github.com/karmada-io/karmada/pull/6562))
# v1.12.7
## Downloads for v1.12.7

View File

@ -2,36 +2,30 @@
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
- [v1.13.5](#v1135)
- [Downloads for v1.13.5](#downloads-for-v1135)
- [Changelog since v1.13.4](#changelog-since-v1134)
- [Changes by Kind](#changes-by-kind)
- [Bug Fixes](#bug-fixes)
- [Others](#others)
- [v1.13.4](#v1134)
- [Downloads for v1.13.4](#downloads-for-v1134)
- [Changelog since v1.13.3](#changelog-since-v1133)
- [Changes by Kind](#changes-by-kind-1)
- [Bug Fixes](#bug-fixes-1)
- [Others](#others-1)
- [Changes by Kind](#changes-by-kind)
- [Bug Fixes](#bug-fixes)
- [Others](#others)
- [v1.13.3](#v1133)
- [Downloads for v1.13.3](#downloads-for-v1133)
- [Changelog since v1.13.2](#changelog-since-v1132)
- [Changes by Kind](#changes-by-kind-2)
- [Bug Fixes](#bug-fixes-2)
- [Others](#others-2)
- [Changes by Kind](#changes-by-kind-1)
- [Bug Fixes](#bug-fixes-1)
- [Others](#others-1)
- [v1.13.2](#v1132)
- [Downloads for v1.13.2](#downloads-for-v1132)
- [Changelog since v1.13.1](#changelog-since-v1131)
- [Changes by Kind](#changes-by-kind-3)
- [Bug Fixes](#bug-fixes-3)
- [Others](#others-3)
- [Changes by Kind](#changes-by-kind-2)
- [Bug Fixes](#bug-fixes-2)
- [Others](#others-2)
- [v1.13.1](#v1131)
- [Downloads for v1.13.1](#downloads-for-v1131)
- [Changelog since v1.13.0](#changelog-since-v1130)
- [Changes by Kind](#changes-by-kind-4)
- [Bug Fixes](#bug-fixes-4)
- [Others](#others-4)
- [Changes by Kind](#changes-by-kind-3)
- [Bug Fixes](#bug-fixes-3)
- [Others](#others-3)
- [v1.13.0](#v1130)
- [Downloads for v1.13.0](#downloads-for-v1130)
- [Urgent Update Notes](#urgent-update-notes)
@ -44,7 +38,7 @@
- [Other Notable Changes](#other-notable-changes)
- [API Changes](#api-changes)
- [Deprecation](#deprecation)
- [Bug Fixes](#bug-fixes-5)
- [Bug Fixes](#bug-fixes-4)
- [Security](#security)
- [Features & Enhancements](#features--enhancements)
- [Other](#other)
@ -56,11 +50,11 @@
- [Downloads for v1.13.0-rc.0](#downloads-for-v1130-rc0)
- [Changelog since v1.13.0-beta.0](#changelog-since-v1130-beta0)
- [Urgent Update Notes](#urgent-update-notes-1)
- [Changes by Kind](#changes-by-kind-5)
- [Changes by Kind](#changes-by-kind-4)
- [API Changes](#api-changes-1)
- [Features & Enhancements](#features--enhancements-1)
- [Deprecation](#deprecation-1)
- [Bug Fixes](#bug-fixes-6)
- [Bug Fixes](#bug-fixes-5)
- [Security](#security-1)
- [Other](#other-1)
- [Dependencies](#dependencies-1)
@ -70,11 +64,11 @@
- [Downloads for v1.13.0-beta.0](#downloads-for-v1130-beta0)
- [Changelog since v1.13.0-alpha.2](#changelog-since-v1130-alpha2)
- [Urgent Update Notes](#urgent-update-notes-2)
- [Changes by Kind](#changes-by-kind-6)
- [Changes by Kind](#changes-by-kind-5)
- [API Changes](#api-changes-2)
- [Features & Enhancements](#features--enhancements-2)
- [Deprecation](#deprecation-2)
- [Bug Fixes](#bug-fixes-7)
- [Bug Fixes](#bug-fixes-6)
- [Security](#security-2)
- [Other](#other-2)
- [Dependencies](#dependencies-2)
@ -84,11 +78,11 @@
- [Downloads for v1.13.0-alpha.2](#downloads-for-v1130-alpha2)
- [Changelog since v1.13.0-alpha.1](#changelog-since-v1130-alpha1)
- [Urgent Update Notes](#urgent-update-notes-3)
- [Changes by Kind](#changes-by-kind-7)
- [Changes by Kind](#changes-by-kind-6)
- [API Changes](#api-changes-3)
- [Features & Enhancements](#features--enhancements-3)
- [Deprecation](#deprecation-3)
- [Bug Fixes](#bug-fixes-8)
- [Bug Fixes](#bug-fixes-7)
- [Security](#security-3)
- [Other](#other-3)
- [Dependencies](#dependencies-3)
@ -98,11 +92,11 @@
- [Downloads for v1.13.0-alpha.1](#downloads-for-v1130-alpha1)
- [Changelog since v1.12.0](#changelog-since-v1120)
- [Urgent Update Notes](#urgent-update-notes-4)
- [Changes by Kind](#changes-by-kind-8)
- [Changes by Kind](#changes-by-kind-7)
- [API Changes](#api-changes-4)
- [Features & Enhancements](#features--enhancements-4)
- [Deprecation](#deprecation-4)
- [Bug Fixes](#bug-fixes-9)
- [Bug Fixes](#bug-fixes-8)
- [Security](#security-4)
- [Other](#other-4)
- [Dependencies](#dependencies-4)
@ -111,20 +105,6 @@
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
# v1.13.5
## Downloads for v1.13.5
Download v1.13.5 in the [v1.13.5 release page](https://github.com/karmada-io/karmada/releases/tag/v1.13.5).
## Changelog since v1.13.4
### Changes by Kind
#### Bug Fixes
- `karmada-controller-manager`: Fixed the issue that resources will be recreated after being deleted on the cluster when resource is suspended for dispatching. ([#6537](https://github.com/karmada-io/karmada/pull/6537), @luyb177)
- `karmada-controller-manager`: Fixed the issue that EndpointSlice are deleted unexpectedly due to the EndpointSlice informer cache not being synced. ([#6584](https://github.com/karmada-io/karmada/pull/6584), @XiShanYongYe-Chang)
#### Others
- The base image `alpine` now has been promoted from 3.22.0 to 3.22.1. ([#6561](https://github.com/karmada-io/karmada/pull/6561))
# v1.13.4
## Downloads for v1.13.4

View File

@ -2,18 +2,12 @@
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
- [v1.14.2](#v1142)
- [Downloads for v1.14.2](#downloads-for-v1142)
- [Changelog since v1.14.1](#changelog-since-v1141)
- [Changes by Kind](#changes-by-kind)
- [Bug Fixes](#bug-fixes)
- [Others](#others)
- [v1.14.1](#v1141)
- [Downloads for v1.14.1](#downloads-for-v1141)
- [Changelog since v1.14.0](#changelog-since-v1140)
- [Changes by Kind](#changes-by-kind-1)
- [Bug Fixes](#bug-fixes-1)
- [Others](#others-1)
- [Changes by Kind](#changes-by-kind)
- [Bug Fixes](#bug-fixes)
- [Others](#others)
- [v1.14.0](#v1140)
- [Downloads for v1.14.0](#downloads-for-v1140)
- [Urgent Update Notes](#urgent-update-notes)
@ -25,7 +19,7 @@
- [Other Notable Changes](#other-notable-changes)
- [API Changes](#api-changes)
- [Deprecation](#deprecation)
- [Bug Fixes](#bug-fixes-2)
- [Bug Fixes](#bug-fixes-1)
- [Security](#security)
- [Features & Enhancements](#features--enhancements)
- [Other](#other)
@ -37,11 +31,11 @@
- [Downloads for v1.14.0-rc.0](#downloads-for-v1140-rc0)
- [Changelog since v1.14.0-beta.0](#changelog-since-v1140-beta0)
- [Urgent Update Notes](#urgent-update-notes-1)
- [Changes by Kind](#changes-by-kind-2)
- [Changes by Kind](#changes-by-kind-1)
- [API Changes](#api-changes-1)
- [Features & Enhancements](#features--enhancements-1)
- [Deprecation](#deprecation-1)
- [Bug Fixes](#bug-fixes-3)
- [Bug Fixes](#bug-fixes-2)
- [Security](#security-1)
- [Other](#other-1)
- [Dependencies](#dependencies-1)
@ -52,11 +46,11 @@
- [Downloads for v1.14.0-beta.0](#downloads-for-v1140-beta0)
- [Changelog since v1.14.0-alpha.2](#changelog-since-v1140-alpha2)
- [Urgent Update Notes](#urgent-update-notes-2)
- [Changes by Kind](#changes-by-kind-3)
- [Changes by Kind](#changes-by-kind-2)
- [API Changes](#api-changes-2)
- [Features & Enhancements](#features--enhancements-2)
- [Deprecation](#deprecation-2)
- [Bug Fixes](#bug-fixes-4)
- [Bug Fixes](#bug-fixes-3)
- [Security](#security-2)
- [Other](#other-2)
- [Dependencies](#dependencies-2)
@ -67,11 +61,11 @@
- [Downloads for v1.14.0-alpha.2](#downloads-for-v1140-alpha2)
- [Changelog since v1.14.0-alpha.1](#changelog-since-v1140-alpha1)
- [Urgent Update Notes](#urgent-update-notes-3)
- [Changes by Kind](#changes-by-kind-4)
- [Changes by Kind](#changes-by-kind-3)
- [API Changes](#api-changes-3)
- [Features & Enhancements](#features--enhancements-3)
- [Deprecation](#deprecation-3)
- [Bug Fixes](#bug-fixes-5)
- [Bug Fixes](#bug-fixes-4)
- [Security](#security-3)
- [Other](#other-3)
- [Dependencies](#dependencies-3)
@ -82,11 +76,11 @@
- [Downloads for v1.14.0-alpha.1](#downloads-for-v1140-alpha1)
- [Changelog since v1.13.0](#changelog-since-v1130)
- [Urgent Update Notes](#urgent-update-notes-4)
- [Changes by Kind](#changes-by-kind-5)
- [Changes by Kind](#changes-by-kind-4)
- [API Changes](#api-changes-4)
- [Features & Enhancements](#features--enhancements-4)
- [Deprecation](#deprecation-4)
- [Bug Fixes](#bug-fixes-6)
- [Bug Fixes](#bug-fixes-5)
- [Security](#security-4)
- [Other](#other-4)
- [Dependencies](#dependencies-4)
@ -95,20 +89,6 @@
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
# v1.14.2
## Downloads for v1.14.2
Download v1.14.2 in the [v1.14.2 release page](https://github.com/karmada-io/karmada/releases/tag/v1.14.2).
## Changelog since v1.14.1
### Changes by Kind
#### Bug Fixes
- `karmada-controller-manager`: Fixed the issue that resources will be recreated after being deleted on the cluster when resource is suspended for dispatching. ([#6536](https://github.com/karmada-io/karmada/pull/6536), @luyb177)
- `karmada-controller-manager`: Fixed the issue that EndpointSlice are deleted unexpectedly due to the EndpointSlice informer cache not being synced. ([#6583](https://github.com/karmada-io/karmada/pull/6583), @XiShanYongYe-Chang)
#### Others
- The base image `alpine` now has been promoted from 3.22.0 to 3.22.1. ([#6559](https://github.com/karmada-io/karmada/pull/6559))
# v1.14.1
## Downloads for v1.14.1

View File

@ -2,9 +2,9 @@
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
- [v1.15.0-beta.0](#v1150-beta0)
- [Downloads for v1.15.0-beta.0](#downloads-for-v1150-beta0)
- [Changelog since v1.15.0-alpha.2](#changelog-since-v1150-alpha2)
- [v1.15.0-alpha.2](#v1150-alpha2)
- [Downloads for v1.15.0-alpha.2](#downloads-for-v1150-alpha2)
- [Changelog since v1.15.0-alpha.1](#changelog-since-v1150-alpha1)
- [Urgent Update Notes](#urgent-update-notes)
- [Changes by Kind](#changes-by-kind)
- [API Changes](#api-changes)
@ -17,9 +17,9 @@
- [Helm Charts](#helm-charts)
- [Instrumentation](#instrumentation)
- [Performance](#performance)
- [v1.15.0-alpha.2](#v1150-alpha2)
- [Downloads for v1.15.0-alpha.2](#downloads-for-v1150-alpha2)
- [Changelog since v1.15.0-alpha.1](#changelog-since-v1150-alpha1)
- [v1.15.0-alpha.1](#v1150-alpha1)
- [Downloads for v1.15.0-alpha.1](#downloads-for-v1150-alpha1)
- [Changelog since v1.14.0](#changelog-since-v1140)
- [Urgent Update Notes](#urgent-update-notes-1)
- [Changes by Kind](#changes-by-kind-1)
- [API Changes](#api-changes-1)
@ -31,68 +31,9 @@
- [Dependencies](#dependencies-1)
- [Helm Charts](#helm-charts-1)
- [Instrumentation](#instrumentation-1)
- [Performance](#performance-1)
- [v1.15.0-alpha.1](#v1150-alpha1)
- [Downloads for v1.15.0-alpha.1](#downloads-for-v1150-alpha1)
- [Changelog since v1.14.0](#changelog-since-v1140)
- [Urgent Update Notes](#urgent-update-notes-2)
- [Changes by Kind](#changes-by-kind-2)
- [API Changes](#api-changes-2)
- [Features & Enhancements](#features--enhancements-2)
- [Deprecation](#deprecation-2)
- [Bug Fixes](#bug-fixes-2)
- [Security](#security-2)
- [Other](#other-2)
- [Dependencies](#dependencies-2)
- [Helm Charts](#helm-charts-2)
- [Instrumentation](#instrumentation-2)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
# v1.15.0-beta.0
## Downloads for v1.15.0-beta.0
Download v1.15.0-beta.0 in the [v1.15.0-beta.0 release page](https://github.com/karmada-io/karmada/releases/tag/v1.15.0-beta.0).
## Changelog since v1.15.0-alpha.2
## Urgent Update Notes
None.
## Changes by Kind
### API Changes
None.
### Features & Enhancements
- `karmada-controller-manager`: Added resource interpreter support for OpenKruise SidecarSet. Karmada can now interpret and manage OpenKruise SidecarSet resources across clusters, including multi-cluster status aggregation, health checks, dependency resolution for ConfigMaps and Secrets, and comprehensive test coverage. ([#6524](https://github.com/karmada-io/karmada/pull/6524), @abhi0324)
- `karmada-controller-manager`: Added resource interpreter support for OpenKruise UnitedDeployment. Karmada can now interpret and manage OpenKruise UnitedDeployment resources across clusters, including multi-cluster status aggregation, health checks, dependency resolution for ConfigMaps and Secrets, and comprehensive test coverage. ([#6533](https://github.com/karmada-io/karmada/pull/6533), @abhi0324)
### Deprecation
- The deprecated label `propagation.karmada.io/instruction`, which was designed to suspend Work propagation, has now been removed. ([#6512](https://github.com/karmada-io/karmada/pull/6512), @XiShanYongYe-Chang)
### Bug Fixes
- `karmada-controller-manager`: Fixed the issue that EndpointSlice are deleted unexpectedly due to the EndpointSlice informer cache not being synced. ([#6434](https://github.com/karmada-io/karmada/pull/6434), @XiShanYongYe-Chang)
### Security
- Bump go version to 1.24.5 for addressing CVE-2025-4674 concern. ([#6557](https://github.com/karmada-io/karmada/pull/6557), @seanlaii)
## Other
### Dependencies
- Upgraded sigs.k8s.io/metrics-server to v0.8.0. ([#6548](https://github.com/karmada-io/karmada/pull/6548), @seanlaii)
- Upgraded sigs.k8s.io/kind to v0.29.0. ([#6549](https://github.com/karmada-io/karmada/pull/6549), @seanlaii)
- Upgraded vektra/mockery to v3.5.1, switching to a configuration-driven approach via mockery.yaml and removing deprecated v2 flags like --inpackage and --name. ([#6550](https://github.com/karmada-io/karmada/pull/6550), @liaolecheng)
- Upgraded controller-gen to v0.18.0. ([#6558](https://github.com/karmada-io/karmada/pull/6558), @seanlaii)
### Helm Charts
None.
### Instrumentation
None.
### Performance
None.
# v1.15.0-alpha.2
## Downloads for v1.15.0-alpha.2

View File

@ -97,7 +97,7 @@ func (ctrl *Controller) Reconcile(ctx context.Context, req controllerruntime.Req
}
if err := ctrl.validateKarmada(ctx, karmada); err != nil {
klog.ErrorS(err, "Validation failed for karmada", "name", karmada.Name)
klog.Errorf("Validation failed for karmada: %+v", err)
return controllerruntime.Result{}, nil
}

View File

@ -80,7 +80,7 @@ func validateETCD(etcd *operatorv1alpha1.Etcd, karmadaName string, fldPath *fiel
replicas := *etcd.Local.CommonSettings.Replicas
if (replicas % 2) == 0 {
klog.InfoS("Using an even number of etcd replicas is not recommended", "replicas", replicas)
klog.Warningf("invalid etcd replicas %d, expected an odd number", replicas)
}
}

View File

@ -56,17 +56,17 @@ type CronFHPAController struct {
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *CronFHPAController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling CronFederatedHPA", "namespace", req.Namespace, "name", req.Name)
klog.V(4).Infof("Reconciling CronFederatedHPA %s", req.NamespacedName)
cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{}
if err := c.Client.Get(ctx, req.NamespacedName, cronFHPA); err != nil {
if apierrors.IsNotFound(err) {
klog.V(4).InfoS("Begin to cleanup the cron jobs for CronFederatedHPA", "namespace", req.Namespace, "name", req.Name)
klog.V(4).Infof("Begin to cleanup the cron jobs for CronFederatedHPA:%s", req.NamespacedName)
c.CronHandler.StopCronFHPAExecutor(req.NamespacedName.String())
return controllerruntime.Result{}, nil
}
klog.ErrorS(err, "Fail to get CronFederatedHPA", "namespace", req.Namespace, "name", req.Name)
klog.Errorf("Fail to get CronFederatedHPA(%s):%v", req.NamespacedName, err)
return controllerruntime.Result{}, err
}
@ -140,7 +140,7 @@ func (c *CronFHPAController) processCronRule(ctx context.Context, cronFHPA *auto
if !helper.IsCronFederatedHPARuleSuspend(rule) {
if err := c.CronHandler.CreateCronJobForExecutor(cronFHPA, rule); err != nil {
c.EventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "StartRuleFailed", err.Error())
klog.ErrorS(err, "Fail to start cron for CronFederatedHPA rule", "cronFederatedHPA", cronFHPAKey, "rule", rule.Name)
klog.Errorf("Fail to start cron for CronFederatedHPA(%s) rule(%s):%v", cronFHPAKey, rule.Name, err)
return err
}
}
@ -159,7 +159,8 @@ func (c *CronFHPAController) updateRuleHistory(ctx context.Context, cronFHPA *au
// If rule is not suspended, we should set the nextExecutionTime filed, or the nextExecutionTime will be nil
next, err := c.CronHandler.GetRuleNextExecuteTime(cronFHPA, rule.Name)
if err != nil {
klog.ErrorS(err, "Fail to get next execution time for CronFederatedHPA rule", "namespace", cronFHPA.Namespace, "name", cronFHPA.Name, "rule", rule.Name)
klog.Errorf("Fail to get next execution time for CronFederatedHPA(%s/%s) rule(%s):%v",
cronFHPA.Namespace, cronFHPA.Name, rule.Name, err)
return err
}
nextExecutionTime = &metav1.Time{Time: next}
@ -184,7 +185,8 @@ func (c *CronFHPAController) updateRuleHistory(ctx context.Context, cronFHPA *au
}
if err := c.Client.Status().Update(ctx, cronFHPA); err != nil {
klog.ErrorS(err, "Fail to update CronFederatedHPA rule's next execution time", "namespace", cronFHPA.Namespace, "name", cronFHPA.Name, "rule", rule.Name)
klog.Errorf("Fail to update CronFederatedHPA(%s/%s) rule(%s)'s next execution time:%v",
cronFHPA.Namespace, cronFHPA.Name, rule.Name, err)
return err
}
@ -208,7 +210,7 @@ func (c *CronFHPAController) removeCronFHPAHistory(ctx context.Context, cronFHPA
}
if err := c.Client.Status().Update(ctx, cronFHPA); err != nil {
c.EventRecorder.Event(cronFHPA, corev1.EventTypeWarning, "UpdateCronFederatedHPAFailed", err.Error())
klog.ErrorS(err, "Fail to remove CronFederatedHPA rule history", "namespace", cronFHPA.Namespace, "name", cronFHPA.Name, "rule", ruleName)
klog.Errorf("Fail to remove CronFederatedHPA(%s/%s) rule(%s) history:%v", cronFHPA.Namespace, cronFHPA.Name, ruleName, err)
return err
}

View File

@ -142,7 +142,8 @@ func (c *CronHandler) CreateCronJobForExecutor(cronFHPA *autoscalingv1alpha1.Cro
timeZone, err = time.LoadLocation(*rule.TimeZone)
if err != nil {
// This should not happen because there is validation in webhook
klog.ErrorS(err, "Invalid CronFederatedHPA rule time zone", "namespace", cronFHPA.Namespace, "name", cronFHPA.Name, "rule", rule.Name, "timeZone", *rule.TimeZone)
klog.Errorf("Invalid CronFederatedHPA(%s/%s) rule(%s) time zone(%s):%v",
cronFHPA.Namespace, cronFHPA.Namespace, rule.Name, *rule.TimeZone, err)
return err
}
}
@ -150,7 +151,8 @@ func (c *CronHandler) CreateCronJobForExecutor(cronFHPA *autoscalingv1alpha1.Cro
scheduler := gocron.NewScheduler(timeZone)
cronJob := NewCronFederatedHPAJob(c.client, c.eventRecorder, scheduler, cronFHPA, rule)
if _, err := scheduler.Cron(rule.Schedule).Do(RunCronFederatedHPARule, cronJob); err != nil {
klog.ErrorS(err, "Create cron job for CronFederatedHPA rule error", "namespace", cronFHPA.Namespace, "name", cronFHPA.Name, "rule", rule.Name)
klog.Errorf("Create cron job for CronFederatedHPA(%s/%s) rule(%s) error:%v",
cronFHPA.Namespace, cronFHPA.Name, rule.Name, err)
return err
}
scheduler.StartAsync()

View File

@ -76,18 +76,18 @@ func RunCronFederatedHPARule(c *ScalingJob) {
err = c.client.Get(context.TODO(), c.namespaceName, cronFHPA)
if err != nil {
if apierrors.IsNotFound(err) {
klog.InfoS("CronFederatedHPA not found", "cronFederatedHPA", c.namespaceName)
klog.Infof("CronFederatedHPA(%s) not found", c.namespaceName)
} else {
// TODO: This may happen when the the network is down, we should do something here
// But we are not sure what to do(retry not solve the problem)
klog.ErrorS(err, "Get CronFederatedHPA failed", "cronFederatedHPA", c.namespaceName)
klog.Errorf("Get CronFederatedHPA(%s) failed: %v", c.namespaceName, err)
}
return
}
if helper.IsCronFederatedHPARuleSuspend(c.rule) {
// If the rule is suspended, this job will be stopped soon
klog.V(4).InfoS("CronFederatedHPA rule is suspended, skip it", "cronFederatedHPA", c.namespaceName, "rule", c.rule.Name)
klog.V(4).Infof("CronFederatedHPA(%s) Rule(%s) is suspended, skip it", c.namespaceName, c.rule.Name)
return
}
@ -153,14 +153,17 @@ func (c *ScalingJob) ScaleFHPA(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) e
if update {
err := c.client.Update(context.TODO(), fhpa)
if err != nil {
klog.ErrorS(err, "CronFederatedHPA updates FederatedHPA failed", "namespace", c.namespaceName.Namespace, "cronFederatedHPA", c.namespaceName.Name, "federatedHPA", fhpa.Name)
klog.Errorf("CronFederatedHPA(%s) updates FederatedHPA(%s/%s) failed: %v",
c.namespaceName, fhpa.Namespace, fhpa.Name, err)
return err
}
klog.V(4).InfoS("CronFederatedHPA scales FederatedHPA successfully", "namespace", c.namespaceName.Namespace, "cronFederatedHPA", c.namespaceName.Name, "federatedHPA", fhpa.Name)
klog.V(4).Infof("CronFederatedHPA(%s) scales FederatedHPA(%s/%s) successfully",
c.namespaceName, fhpa.Namespace, fhpa.Name)
return nil
}
klog.V(4).InfoS("CronFederatedHPA find nothing updated for FederatedHPA, skip it", "namespace", c.namespaceName.Namespace, "cronFederatedHPA", c.namespaceName.Name, "federatedHPA", fhpa.Name)
klog.V(4).Infof("CronFederatedHPA(%s) find nothing updated for FederatedHPA(%s/%s), skip it",
c.namespaceName, fhpa.Namespace, fhpa.Name)
return nil
}
@ -172,7 +175,8 @@ func (c *ScalingJob) ScaleWorkloads(cronFHPA *autoscalingv1alpha1.CronFederatedH
targetGV, err := schema.ParseGroupVersion(cronFHPA.Spec.ScaleTargetRef.APIVersion)
if err != nil {
klog.ErrorS(err, "CronFederatedHPA parses GroupVersion failed", "cronFederatedHPA", c.namespaceName, "groupVersion", cronFHPA.Spec.ScaleTargetRef.APIVersion)
klog.Errorf("CronFederatedHPA(%s) parses GroupVersion(%s) failed: %v",
c.namespaceName, cronFHPA.Spec.ScaleTargetRef.APIVersion, err)
return err
}
targetGVK := schema.GroupVersionKind{
@ -184,35 +188,37 @@ func (c *ScalingJob) ScaleWorkloads(cronFHPA *autoscalingv1alpha1.CronFederatedH
targetResource.SetGroupVersionKind(targetGVK)
err = c.client.Get(ctx, types.NamespacedName{Namespace: cronFHPA.Namespace, Name: cronFHPA.Spec.ScaleTargetRef.Name}, targetResource)
if err != nil {
klog.ErrorS(err, "Get resource failed", "namespace", cronFHPA.Namespace, "name", cronFHPA.Spec.ScaleTargetRef.Name)
klog.Errorf("Get Resource(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name, err)
return err
}
scaleObj := &unstructured.Unstructured{}
err = scaleClient.Get(ctx, targetResource, scaleObj)
if err != nil {
klog.ErrorS(err, "Get Scale for resource failed", "namespace", cronFHPA.Namespace, "name", cronFHPA.Spec.ScaleTargetRef.Name)
klog.Errorf("Get Scale for resource(%s/%s) failed: %v", cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name, err)
return err
}
scale := &autoscalingv1.Scale{}
err = helper.ConvertToTypedObject(scaleObj, scale)
if err != nil {
klog.ErrorS(err, "Convert Scale failed", "namespace", cronFHPA.Namespace, "name", cronFHPA.Spec.ScaleTargetRef.Name)
klog.Errorf("Convert Scale failed: %v", err)
return err
}
if scale.Spec.Replicas != *c.rule.TargetReplicas {
if err := helper.ApplyReplica(scaleObj, int64(*c.rule.TargetReplicas), util.ReplicasField); err != nil {
klog.ErrorS(err, "CronFederatedHPA applies Replicas failed", "cronFederatedHPA", c.namespaceName, "namespace", cronFHPA.Namespace, "name", cronFHPA.Spec.ScaleTargetRef.Name)
klog.Errorf("CronFederatedHPA(%s) applies Replicas for %s/%s failed: %v",
c.namespaceName, cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name, err)
return err
}
err := scaleClient.Update(ctx, targetResource, client.WithSubResourceBody(scaleObj))
if err != nil {
klog.ErrorS(err, "CronFederatedHPA updates scale resource failed", "cronFederatedHPA", c.namespaceName, "namespace", cronFHPA.Namespace, "name", cronFHPA.Spec.ScaleTargetRef.Name)
klog.Errorf("CronFederatedHPA(%s) updates scale resource failed: %v", c.namespaceName, err)
return err
}
klog.V(4).InfoS("CronFederatedHPA scales resource successfully", "cronFederatedHPA", c.namespaceName, "namespace", cronFHPA.Namespace, "name", cronFHPA.Spec.ScaleTargetRef.Name)
klog.V(4).Infof("CronFederatedHPA(%s) scales resource(%s/%s) successfully",
c.namespaceName, cronFHPA.Namespace, cronFHPA.Spec.ScaleTargetRef.Name)
return nil
}
return nil
@ -252,12 +258,12 @@ func (c *ScalingJob) addFailedExecutionHistory(
})
return err
}); err != nil {
klog.ErrorS(err, "Failed to add failed history record to CronFederatedHPA", "namespace", cronFHPA.Namespace, "name", cronFHPA.Name)
klog.Errorf("Failed to add failed history record to CronFederatedHPA(%s/%s): %v", cronFHPA.Namespace, cronFHPA.Name, err)
return err
}
if operationResult == controllerutil.OperationResultUpdatedStatusOnly {
klog.V(4).InfoS("CronFederatedHPA status has been updated successfully", "namespace", cronFHPA.Namespace, "name", cronFHPA.Name)
klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name)
}
return nil
@ -310,12 +316,12 @@ func (c *ScalingJob) addSuccessExecutionHistory(
})
return err
}); err != nil {
klog.ErrorS(err, "Failed to add success history record to CronFederatedHPA", "namespace", cronFHPA.Namespace, "name", cronFHPA.Name)
klog.Errorf("Failed to add success history record to CronFederatedHPA(%s/%s): %v", cronFHPA.Namespace, cronFHPA.Name, err)
return err
}
if operationResult == controllerutil.OperationResultUpdatedStatusOnly {
klog.V(4).InfoS("CronFederatedHPA status has been updated successfully", "namespace", cronFHPA.Namespace, "name", cronFHPA.Name)
klog.V(4).Infof("CronFederatedHPA(%s/%s) status has been updated successfully", cronFHPA.Namespace, cronFHPA.Name)
}
return nil

View File

@ -113,7 +113,7 @@ func (r *DeploymentReplicasSyncer) Reconcile(ctx context.Context, req controller
if err := r.Client.Get(ctx, client.ObjectKey{Namespace: req.Namespace, Name: bindingName}, binding); err != nil {
if apierrors.IsNotFound(err) {
klog.InfoS("no need to update deployment replicas as binding was not found",
"namespace", req.Namespace, "name", req.Name)
"namespace", req.Namespace, "name", bindingName)
return controllerruntime.Result{}, nil
}
return controllerruntime.Result{}, err
@ -165,12 +165,7 @@ func (r *DeploymentReplicasSyncer) Reconcile(ctx context.Context, req controller
// isDeploymentStatusCollected judge whether deployment modification in spec has taken effect and its status has been collected.
func isDeploymentStatusCollected(deployment *appsv1.Deployment, binding *workv1alpha2.ResourceBinding) bool {
// make sure the replicas change in deployment.spec can sync to binding.spec, otherwise retry
if deployment.Spec.Replicas == nil {
// should never happen, as karmada-apiserver defaults `deployment.spec.Replicas` to 1 if it is not set.
klog.ErrorS(nil, "deployment replicas is nil", "namespace", deployment.Namespace, "name", deployment.Name)
return false
}
if *deployment.Spec.Replicas != binding.Spec.Replicas {
if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != binding.Spec.Replicas {
klog.V(4).InfoS("wait until binding replicas are equal to deployment replicas",
"bindingReplicas", binding.Spec.Replicas, "deploymentReplicas", *deployment.Spec.Replicas,
"namespace", deployment.Namespace, "deploymentName", deployment.Name, "bindingName", binding.Name)

View File

@ -66,7 +66,7 @@ type QuotaEnforcementController struct {
// The SyncController will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *QuotaEnforcementController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("QuotaEnforcementController reconciling", "namespacedName", req.NamespacedName.String())
klog.V(4).Infof("QuotaEnforcementController reconciling %s", req.NamespacedName.String())
quota := &policyv1alpha1.FederatedResourceQuota{}
if err := c.Get(ctx, req.NamespacedName, quota); err != nil {
@ -74,7 +74,7 @@ func (c *QuotaEnforcementController) Reconcile(ctx context.Context, req controll
if apierrors.IsNotFound(err) {
return controllerruntime.Result{}, nil
}
klog.ErrorS(err, "Error fetching FederatedResourceQuota", "federatedResourceQuota", req.NamespacedName.String())
klog.Errorf("Error fetching FederatedResourceQuota %s: %v", req.NamespacedName.String(), err)
return controllerruntime.Result{}, err
}
@ -83,7 +83,7 @@ func (c *QuotaEnforcementController) Reconcile(ctx context.Context, req controll
}
if err := c.collectQuotaStatus(quota); err != nil {
klog.ErrorS(err, "Failed to collect status for FederatedResourceQuota", "federatedResourceQuota", req.NamespacedName.String())
klog.Errorf("Failed to collect status for FederatedResourceQuota(%s), error: %v", req.NamespacedName.String(), err)
c.EventRecorder.Eventf(quota, corev1.EventTypeWarning, events.EventReasonCollectFederatedResourceQuotaOverallStatusFailed, err.Error())
return controllerruntime.Result{}, err
}
@ -99,13 +99,13 @@ func (c *QuotaEnforcementController) SetupWithManager(mgr controllerruntime.Mana
func(ctx context.Context, obj client.Object) []reconcile.Request {
rb, ok := obj.(*workv1alpha2.ResourceBinding)
if !ok {
klog.ErrorS(fmt.Errorf("unexpected type: %T", obj), "Failed to convert object to ResourceBinding", "object", obj)
klog.Errorf("Failed to convert object %v to ResourceBinding", obj)
return []reconcile.Request{}
}
federatedResourceQuotaList := &policyv1alpha1.FederatedResourceQuotaList{}
if err := c.Client.List(ctx, federatedResourceQuotaList, &client.ListOptions{Namespace: rb.GetNamespace()}); err != nil {
klog.ErrorS(err, "Failed to list FederatedResourceQuota")
klog.Errorf("Failed to list FederatedResourceQuota, error: %v", err)
return []reconcile.Request{}
}
@ -129,7 +129,7 @@ func (c *QuotaEnforcementController) SetupWithManager(mgr controllerruntime.Mana
func(ctx context.Context, _ client.Object) []reconcile.Request {
federatedResourceQuotaList := &policyv1alpha1.FederatedResourceQuotaList{}
if err := c.Client.List(ctx, federatedResourceQuotaList); err != nil {
klog.ErrorS(err, "Failed to list FederatedResourceQuota")
klog.Errorf("Failed to list FederatedResourceQuota, error: %v", err)
return []reconcile.Request{}
}
@ -217,7 +217,7 @@ func (q *QuotaRecalculation) Start(ctx context.Context) error {
defer close(q.resyncEvent)
if q.ResyncPeriod.Duration > 0 {
klog.InfoS("Starting FederatedResourceQuota recalculation process with period", "duration", q.ResyncPeriod.Duration.String())
klog.Infof("Starting FederatedResourceQuota recalculation process with period %s", q.ResyncPeriod.Duration.String())
ticker := time.NewTicker(q.ResyncPeriod.Duration)
defer ticker.Stop()
for {
@ -242,7 +242,7 @@ func (c *QuotaEnforcementController) collectQuotaStatus(quota *policyv1alpha1.Fe
// TODO: Consider adding filtering step to ResourceBinding list once scope is added to the quota
bindingList, err := helper.GetResourceBindingsByNamespace(c.Client, quota.Namespace)
if err != nil {
klog.ErrorS(err, "Failed to list resourcebindings tracked by FederatedResourceQuota", "federatedResourceQuota", klog.KObj(quota).String())
klog.Errorf("Failed to list resourcebindings tracked by FederatedResourceQuota(%s), error: %v", klog.KObj(quota).String(), err)
return err
}
@ -251,7 +251,7 @@ func (c *QuotaEnforcementController) collectQuotaStatus(quota *policyv1alpha1.Fe
quotaStatus.OverallUsed = calculateUsedWithResourceBinding(bindingList.Items, quota.Spec.Overall)
if reflect.DeepEqual(quota.Status, *quotaStatus) {
klog.V(4).InfoS("New quotaStatus is equal with old federatedResourceQuota status, no update required.", "federatedResourceQuota", klog.KObj(quota).String())
klog.V(4).Infof("New quotaStatus is equal with old federatedResourceQuota(%s) status, no update required.", klog.KObj(quota).String())
return nil
}

View File

@ -66,7 +66,7 @@ type StatusController struct {
// The SyncController will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *StatusController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("FederatedResourceQuota status controller reconciling", "namespacedName", req.NamespacedName.String())
klog.V(4).Infof("FederatedResourceQuota status controller reconciling %s", req.NamespacedName.String())
quota := &policyv1alpha1.FederatedResourceQuota{}
if err := c.Get(ctx, req.NamespacedName, quota); err != nil {
@ -82,7 +82,7 @@ func (c *StatusController) Reconcile(ctx context.Context, req controllerruntime.
}
if err := c.collectQuotaStatus(ctx, quota); err != nil {
klog.ErrorS(err, "Failed to collect status from works to federatedResourceQuota", "federatedResourceQuota", req.NamespacedName.String())
klog.Errorf("Failed to collect status from works to federatedResourceQuota(%s), error: %v", req.NamespacedName.String(), err)
c.EventRecorder.Eventf(quota, corev1.EventTypeWarning, events.EventReasonCollectFederatedResourceQuotaStatusFailed, err.Error())
return controllerruntime.Result{}, err
}
@ -154,7 +154,7 @@ func (c *StatusController) collectQuotaStatus(ctx context.Context, quota *policy
util.FederatedResourceQuotaNameLabel: quota.Name,
})
if err != nil {
klog.ErrorS(err, "Failed to list workList created by federatedResourceQuota", "federatedResourceQuota", klog.KObj(quota).String())
klog.Errorf("Failed to list workList created by federatedResourceQuota(%s), error: %v", klog.KObj(quota).String(), err)
return err
}
@ -172,7 +172,7 @@ func (c *StatusController) collectQuotaStatus(ctx context.Context, quota *policy
}
if reflect.DeepEqual(quota.Status, *quotaStatus) {
klog.V(4).InfoS("New quotaStatus is equal with old federatedResourceQuota status, no update required.", "federatedResourceQuota", klog.KObj(quota).String())
klog.V(4).Infof("New quotaStatus are equal with old federatedResourceQuota(%s) status, no update required.", klog.KObj(quota).String())
return nil
}
@ -214,13 +214,13 @@ func aggregatedStatusFormWorks(works []workv1alpha1.Work) ([]policyv1alpha1.Clus
clusterName, err := names.GetClusterName(work.Namespace)
if err != nil {
klog.ErrorS(err, "Failed to get clusterName from work namespace.", "workNamespace", work.Namespace)
klog.Errorf("Failed to get clusterName from work namespace %s. Error: %v.", work.Namespace, err)
return nil, err
}
status := &corev1.ResourceQuotaStatus{}
if err := json.Unmarshal(work.Status.ManifestStatuses[0].Status.Raw, status); err != nil {
klog.ErrorS(err, "Failed to unmarshal work status to ResourceQuotaStatus", "work", klog.KObj(&work).String())
klog.Errorf("Failed to unmarshal work(%s) status to ResourceQuotaStatus", klog.KObj(&work).String())
return nil, err
}

View File

@ -63,14 +63,14 @@ type SyncController struct {
// The SyncController will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *SyncController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("FederatedResourceQuota sync controller reconciling", "namespacedName", req.NamespacedName.String())
klog.V(4).Infof("FederatedResourceQuota sync controller reconciling %s", req.NamespacedName.String())
quota := &policyv1alpha1.FederatedResourceQuota{}
if err := c.Client.Get(ctx, req.NamespacedName, quota); err != nil {
if apierrors.IsNotFound(err) {
klog.V(4).InfoS("Begin to cleanup works created by federatedResourceQuota", "namespacedName", req.NamespacedName.String())
klog.V(4).Infof("Begin to cleanup works created by federatedResourceQuota(%s)", req.NamespacedName.String())
if err = c.cleanUpWorks(ctx, req.Namespace, req.Name); err != nil {
klog.ErrorS(err, "Failed to cleanup works created by federatedResourceQuota", "namespacedName", req.NamespacedName.String())
klog.Errorf("Failed to cleanup works created by federatedResourceQuota(%s)", req.NamespacedName.String())
return controllerruntime.Result{}, err
}
return controllerruntime.Result{}, nil
@ -79,18 +79,18 @@ func (c *SyncController) Reconcile(ctx context.Context, req controllerruntime.Re
}
if err := c.cleanUpOrphanWorks(ctx, quota); err != nil {
klog.ErrorS(err, "Failed to cleanup orphan works for federatedResourceQuota", "namespacedName", req.NamespacedName.String())
klog.Errorf("Failed to cleanup orphan works for federatedResourceQuota(%s), error: %v", req.NamespacedName.String(), err)
return controllerruntime.Result{}, err
}
clusterList := &clusterv1alpha1.ClusterList{}
if err := c.Client.List(ctx, clusterList); err != nil {
klog.ErrorS(err, "Failed to list clusters")
klog.Errorf("Failed to list clusters, error: %v", err)
return controllerruntime.Result{}, err
}
if err := c.buildWorks(ctx, quota, clusterList.Items); err != nil {
klog.ErrorS(err, "Failed to build works for federatedResourceQuota", "namespacedName", req.NamespacedName.String())
klog.Errorf("Failed to build works for federatedResourceQuota(%s), error: %v", req.NamespacedName.String(), err)
c.EventRecorder.Eventf(quota, corev1.EventTypeWarning, events.EventReasonSyncFederatedResourceQuotaFailed, err.Error())
return controllerruntime.Result{}, err
}
@ -114,7 +114,7 @@ func (c *SyncController) SetupWithManager(mgr controllerruntime.Manager) error {
FederatedResourceQuotaList := &policyv1alpha1.FederatedResourceQuotaList{}
if err := c.Client.List(ctx, FederatedResourceQuotaList); err != nil {
klog.ErrorS(err, "Failed to list FederatedResourceQuota")
klog.Errorf("Failed to list FederatedResourceQuota, error: %v", err)
}
for _, federatedResourceQuota := range FederatedResourceQuotaList.Items {
@ -189,14 +189,14 @@ func (c *SyncController) cleanUpWorks(ctx context.Context, namespace, name strin
util.FederatedResourceQuotaNamespaceLabel: namespace,
util.FederatedResourceQuotaNameLabel: name,
}); err != nil {
klog.ErrorS(err, "Failed to list works")
klog.Errorf("Failed to list works, err: %v", err)
return err
}
for index := range workList.Items {
work := &workList.Items[index]
if err := c.Delete(ctx, work); err != nil && !apierrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete work", "work", klog.KObj(work).String())
klog.Errorf("Failed to delete work(%s): %v", klog.KObj(work).String(), err)
errs = append(errs, err)
}
}
@ -211,7 +211,7 @@ func (c *SyncController) cleanUpOrphanWorks(ctx context.Context, quota *policyv1
util.FederatedResourceQuotaNamespaceLabel: quota.GetNamespace(),
util.FederatedResourceQuotaNameLabel: quota.GetName(),
}); err != nil {
klog.ErrorS(err, "Failed to list works")
klog.Errorf("Failed to list works, err: %v", err)
return err
}
@ -221,7 +221,7 @@ func (c *SyncController) cleanUpOrphanWorks(ctx context.Context, quota *policyv1
continue
}
if err := c.Delete(ctx, work); err != nil && !apierrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete work", "work", klog.KObj(work).String())
klog.Errorf("Failed to delete work(%s): %v", klog.KObj(work).String(), err)
errs = append(errs, err)
}
}
@ -246,7 +246,7 @@ func (c *SyncController) buildWorks(ctx context.Context, quota *policyv1alpha1.F
resourceQuotaObj, err := helper.ToUnstructured(resourceQuota)
if err != nil {
klog.ErrorS(err, "Failed to transform resourceQuota", "resourceQuota", klog.KObj(resourceQuota).String())
klog.Errorf("Failed to transform resourceQuota(%s), error: %v", klog.KObj(resourceQuota).String(), err)
errs = append(errs, err)
continue
}

View File

@ -52,7 +52,7 @@ type CRBGracefulEvictionController struct {
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *CRBGracefulEvictionController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling ClusterResourceBinding", "name", req.NamespacedName.String())
klog.V(4).Infof("Reconciling ClusterResourceBinding %s.", req.NamespacedName.String())
binding := &workv1alpha2.ClusterResourceBinding{}
if err := c.Client.Get(ctx, req.NamespacedName, binding); err != nil {
@ -71,7 +71,7 @@ func (c *CRBGracefulEvictionController) Reconcile(ctx context.Context, req contr
return controllerruntime.Result{}, err
}
if retryDuration > 0 {
klog.V(4).InfoS("Retry to evict task after minutes", "retryAfterMinutes", retryDuration.Minutes())
klog.V(4).Infof("Retry to evict task after %v minutes.", retryDuration.Minutes())
return controllerruntime.Result{RequeueAfter: retryDuration}, nil
}
return controllerruntime.Result{}, nil
@ -97,8 +97,8 @@ func (c *CRBGracefulEvictionController) syncBinding(ctx context.Context, binding
}
for _, cluster := range evictedClusters {
klog.V(2).InfoS("Evicted cluster from ClusterResourceBinding gracefulEvictionTasks",
"cluster", cluster, "name", binding.Name)
klog.V(2).Infof("Success to evict Cluster(%s) from ClusterResourceBinding(%s) gracefulEvictionTasks",
cluster, binding.Name)
helper.EmitClusterEvictionEventForClusterResourceBinding(binding, cluster, c.EventRecorder, err)
}
return nextRetry(keptTask, c.GracefulEvictionTimeout, metav1.Now().Time), nil

View File

@ -52,7 +52,7 @@ type RBGracefulEvictionController struct {
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *RBGracefulEvictionController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling ResourceBinding", "namespace", req.Namespace, "name", req.Name)
klog.V(4).Infof("Reconciling ResourceBinding %s.", req.NamespacedName.String())
binding := &workv1alpha2.ResourceBinding{}
if err := c.Client.Get(ctx, req.NamespacedName, binding); err != nil {
@ -71,7 +71,7 @@ func (c *RBGracefulEvictionController) Reconcile(ctx context.Context, req contro
return controllerruntime.Result{}, err
}
if retryDuration > 0 {
klog.V(4).InfoS("Retry to evict task after minutes", "retryAfterMinutes", retryDuration.Minutes())
klog.V(4).Infof("Retry to evict task after %v minutes.", retryDuration.Minutes())
return controllerruntime.Result{RequeueAfter: retryDuration}, nil
}
return controllerruntime.Result{}, nil
@ -97,8 +97,8 @@ func (c *RBGracefulEvictionController) syncBinding(ctx context.Context, binding
}
for _, cluster := range evictedCluster {
klog.V(2).InfoS("Evicted cluster from ResourceBinding gracefulEvictionTasks", "cluster", cluster,
"namespace", binding.Namespace, "name", binding.Name)
klog.V(2).Infof("Success to evict Cluster(%s) from ResourceBinding(%s/%s) gracefulEvictionTasks",
cluster, binding.Namespace, binding.Name)
helper.EmitClusterEvictionEventForResourceBinding(binding, cluster, c.EventRecorder, err)
}
return nextRetry(keptTask, c.GracefulEvictionTimeout, metav1.Now().Time), nil

View File

@ -408,14 +408,6 @@ func (c *ServiceExportController) reportEndpointSliceWithServiceExportCreate(ctx
return nil
}
// Before retrieving EndpointSlice objects from the informer, ensure the informer cache is synced.
// This is necessary because the informer for EndpointSlice is created dynamically in the Reconcile() routine
// when a Work resource containing an ServiceExport is detected for the cluster. If the informer is not yet synced,
// return an error and wait a retry at the next time.
if !singleClusterManager.IsInformerSynced(endpointSliceGVR) {
return fmt.Errorf("the informer for cluster %s has not been synced, wait a retry at the next time", serviceExportKey.Cluster)
}
endpointSliceLister := singleClusterManager.Lister(endpointSliceGVR)
if endpointSliceObjects, err = endpointSliceLister.ByNamespace(serviceExportKey.Namespace).List(labels.SelectorFromSet(labels.Set{
discoveryv1.LabelServiceName: serviceExportKey.Name,
@ -491,14 +483,6 @@ func (c *ServiceExportController) reportEndpointSliceWithEndpointSliceCreateOrUp
return nil
}
// Before retrieving ServiceExport objects from the informer, ensure the informer cache is synced.
// This is necessary because the informer for ServiceExport is created dynamically in the Reconcile() routine
// when a Work resource containing an ServiceExport is detected for the cluster. If the informer is not yet synced,
// return an error and wait a retry at the next time.
if !singleClusterManager.IsInformerSynced(serviceExportGVR) {
return fmt.Errorf("the informer for cluster %s has not been synced, wait a retry at the next time", clusterName)
}
serviceExportLister := singleClusterManager.Lister(serviceExportGVR)
_, err := serviceExportLister.ByNamespace(endpointSlice.GetNamespace()).Get(relatedServiceName)
if err != nil {
@ -630,7 +614,6 @@ func cleanEndpointSliceWork(ctx context.Context, c client.Client, work *workv1al
klog.Errorf("Failed to update work(%s/%s): %v", work.Namespace, work.Name, err)
return err
}
klog.Infof("Successfully updated work(%s/%s)", work.Namespace, work.Name)
return nil
}
@ -638,7 +621,6 @@ func cleanEndpointSliceWork(ctx context.Context, c client.Client, work *workv1al
klog.Errorf("Failed to delete work(%s/%s), Error: %v", work.Namespace, work.Name, err)
return err
}
klog.Infof("Successfully deleted work(%s/%s)", work.Namespace, work.Name)
return nil
}

View File

@ -18,7 +18,6 @@ package multiclusterservice
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
@ -85,7 +84,7 @@ const EndpointSliceCollectControllerName = "endpointslice-collect-controller"
// Reconcile performs a full reconciliation for the object referred to by the Request.
func (c *EndpointSliceCollectController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling Work", "namespace", req.Namespace, "name", req.Name)
klog.V(4).Infof("Reconciling Work %s", req.NamespacedName.String())
work := &workv1alpha1.Work{}
if err := c.Client.Get(ctx, req.NamespacedName, work); err != nil {
@ -106,7 +105,7 @@ func (c *EndpointSliceCollectController) Reconcile(ctx context.Context, req cont
clusterName, err := names.GetClusterName(work.Namespace)
if err != nil {
klog.ErrorS(err, "Failed to get cluster name for work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to get cluster name for work %s/%s", work.Namespace, work.Name)
return controllerruntime.Result{}, err
}
@ -145,15 +144,14 @@ func (c *EndpointSliceCollectController) collectEndpointSlice(key util.QueueKey)
ctx := context.Background()
fedKey, ok := key.(keys.FederatedKey)
if !ok {
var ErrInvalidKey = errors.New("invalid key")
klog.ErrorS(ErrInvalidKey, "Failed to collect endpointslice as invalid key", "key", key)
klog.Errorf("Failed to collect endpointslice as invalid key: %v", key)
return fmt.Errorf("invalid key")
}
klog.V(4).InfoS("Begin to collect", "kind", fedKey.Kind, "namespaceKey", fedKey.NamespaceKey())
klog.V(4).Infof("Begin to collect %s %s.", fedKey.Kind, fedKey.NamespaceKey())
if err := c.handleEndpointSliceEvent(ctx, fedKey); err != nil {
klog.ErrorS(err, "Failed to handle endpointSlice event", "namespaceKey",
fedKey.NamespaceKey())
klog.Errorf("Failed to handle endpointSlice(%s) event, Error: %v",
fedKey.NamespaceKey(), err)
return err
}
@ -163,18 +161,17 @@ func (c *EndpointSliceCollectController) collectEndpointSlice(key util.QueueKey)
func (c *EndpointSliceCollectController) buildResourceInformers(clusterName string) error {
cluster, err := util.GetCluster(c.Client, clusterName)
if err != nil {
klog.ErrorS(err, "Failed to get the given member cluster", "cluster", clusterName)
klog.Errorf("Failed to get the given member cluster %s", clusterName)
return err
}
if !util.IsClusterReady(&cluster.Status) {
var ErrClusterNotReady = errors.New("cluster not ready")
klog.ErrorS(ErrClusterNotReady, "Stop collect endpointslice for cluster as cluster not ready.", "cluster", cluster.Name)
klog.Errorf("Stop collect endpointslice for cluster(%s) as cluster not ready.", cluster.Name)
return fmt.Errorf("cluster(%s) not ready", cluster.Name)
}
if err := c.registerInformersAndStart(cluster); err != nil {
klog.ErrorS(err, "Failed to register informer for Cluster", "cluster", cluster.Name)
klog.Errorf("Failed to register informer for Cluster %s. Error: %v.", cluster.Name, err)
return err
}
@ -188,7 +185,7 @@ func (c *EndpointSliceCollectController) registerInformersAndStart(cluster *clus
if singleClusterInformerManager == nil {
dynamicClusterClient, err := c.ClusterDynamicClientSetFunc(cluster.Name, c.Client, c.ClusterClientOption)
if err != nil {
klog.ErrorS(err, "Failed to build dynamic cluster client for cluster", "cluster", cluster.Name)
klog.Errorf("Failed to build dynamic cluster client for cluster %s.", cluster.Name)
return err
}
singleClusterInformerManager = c.InformerManager.ForCluster(dynamicClusterClient.ClusterName, dynamicClusterClient.DynamicClientSet, 0)
@ -223,7 +220,7 @@ func (c *EndpointSliceCollectController) registerInformersAndStart(cluster *clus
}
return nil
}(); err != nil {
klog.ErrorS(err, "Failed to sync cache for cluster", "cluster", cluster.Name)
klog.Errorf("Failed to sync cache for cluster: %s, error: %v", cluster.Name, err)
c.InformerManager.Stop(cluster.Name)
return err
}
@ -248,7 +245,7 @@ func (c *EndpointSliceCollectController) genHandlerAddFunc(clusterName string) f
curObj := obj.(runtime.Object)
key, err := keys.FederatedKeyFunc(clusterName, curObj)
if err != nil {
klog.ErrorS(err, "Failed to generate key for obj", "gvk", curObj.GetObjectKind().GroupVersionKind())
klog.Warningf("Failed to generate key for obj: %s", curObj.GetObjectKind().GroupVersionKind())
return
}
c.worker.Add(key)
@ -261,7 +258,7 @@ func (c *EndpointSliceCollectController) genHandlerUpdateFunc(clusterName string
if !reflect.DeepEqual(oldObj, newObj) {
key, err := keys.FederatedKeyFunc(clusterName, curObj)
if err != nil {
klog.ErrorS(err, "Failed to generate key for obj", "gvk", curObj.GetObjectKind().GroupVersionKind())
klog.Warningf("Failed to generate key for obj: %s", curObj.GetObjectKind().GroupVersionKind())
return
}
c.worker.Add(key)
@ -281,7 +278,7 @@ func (c *EndpointSliceCollectController) genHandlerDeleteFunc(clusterName string
oldObj := obj.(runtime.Object)
key, err := keys.FederatedKeyFunc(clusterName, oldObj)
if err != nil {
klog.ErrorS(err, "Failed to generate key for obj", "gvk", oldObj.GetObjectKind().GroupVersionKind())
klog.Warningf("Failed to generate key for obj: %s", oldObj.GetObjectKind().GroupVersionKind())
return
}
c.worker.Add(key)
@ -311,7 +308,7 @@ func (c *EndpointSliceCollectController) handleEndpointSliceEvent(ctx context.Co
util.MultiClusterServiceNamespaceLabel: endpointSliceKey.Namespace,
util.MultiClusterServiceNameLabel: util.GetLabelValue(endpointSliceObj.GetLabels(), discoveryv1.LabelServiceName),
})}); err != nil {
klog.ErrorS(err, "Failed to list workList reported by endpointSlice", "namespace", endpointSliceKey.Namespace, "name", endpointSliceKey.Name)
klog.Errorf("Failed to list workList reported by endpointSlice(%s/%s), error: %v", endpointSliceKey.Namespace, endpointSliceKey.Name, err)
return err
}
@ -327,8 +324,8 @@ func (c *EndpointSliceCollectController) handleEndpointSliceEvent(ctx context.Co
}
if err = c.reportEndpointSliceWithEndpointSliceCreateOrUpdate(ctx, endpointSliceKey.Cluster, endpointSliceObj); err != nil {
klog.ErrorS(err, "Failed to handle endpointSlice event", "namespaceKey",
endpointSliceKey.NamespaceKey())
klog.Errorf("Failed to handle endpointSlice(%s) event, Error: %v",
endpointSliceKey.NamespaceKey(), err)
return err
}
@ -339,7 +336,7 @@ func (c *EndpointSliceCollectController) collectTargetEndpointSlice(ctx context.
manager := c.InformerManager.GetSingleClusterManager(clusterName)
if manager == nil {
err := fmt.Errorf("failed to get informer manager for cluster %s", clusterName)
klog.ErrorS(err, "Failed to get informer manager for cluster")
klog.Errorf("%v", err)
return err
}
@ -350,13 +347,13 @@ func (c *EndpointSliceCollectController) collectTargetEndpointSlice(ctx context.
})
epsList, err := manager.Lister(discoveryv1.SchemeGroupVersion.WithResource("endpointslices")).ByNamespace(svcNamespace).List(selector)
if err != nil {
klog.ErrorS(err, "Failed to list EndpointSlice for Service in a cluster", "namespace", svcNamespace, "name", svcName, "cluster", clusterName)
klog.Errorf("Failed to list EndpointSlice for Service(%s/%s) in cluster(%s), Error: %v", svcNamespace, svcName, clusterName, err)
return err
}
for _, epsObj := range epsList {
eps := &discoveryv1.EndpointSlice{}
if err = helper.ConvertToTypedObject(epsObj, eps); err != nil {
klog.ErrorS(err, "Failed to convert object to EndpointSlice")
klog.Errorf("Failed to convert object to EndpointSlice, error: %v", err)
return err
}
if util.GetLabelValue(eps.GetLabels(), discoveryv1.LabelManagedBy) == util.EndpointSliceDispatchControllerLabelValue {
@ -364,7 +361,7 @@ func (c *EndpointSliceCollectController) collectTargetEndpointSlice(ctx context.
}
epsUnstructured, err := helper.ToUnstructured(eps)
if err != nil {
klog.ErrorS(err, "Failed to convert EndpointSlice to unstructured", "namespace", eps.GetNamespace(), "name", eps.GetName())
klog.Errorf("Failed to convert EndpointSlice %s/%s to unstructured, error: %v", eps.GetNamespace(), eps.GetName(), err)
return err
}
if err = c.reportEndpointSliceWithEndpointSliceCreateOrUpdate(ctx, clusterName, epsUnstructured); err != nil {
@ -397,7 +394,7 @@ func reportEndpointSlice(ctx context.Context, c client.Client, endpointSlice *un
// indicate the Work should be not propagated since it's collected resource.
if err := ctrlutil.CreateOrUpdateWork(ctx, c, workMeta, endpointSlice, ctrlutil.WithSuspendDispatching(true)); err != nil {
klog.ErrorS(err, "Failed to create or update work", "namespace", workMeta.Namespace, "name", workMeta.Name)
klog.Errorf("Failed to create or update work(%s/%s), Error: %v", workMeta.Namespace, workMeta.Name, err)
return err
}
@ -411,7 +408,7 @@ func getEndpointSliceWorkMeta(ctx context.Context, c client.Client, ns string, w
Namespace: ns,
Name: workName,
}, existWork); err != nil && !apierrors.IsNotFound(err) {
klog.ErrorS(err, "Get EndpointSlice work", "namespace", ns, "name", workName)
klog.Errorf("Get EndpointSlice work(%s/%s) error:%v", ns, workName, err)
return metav1.ObjectMeta{}, err
}
@ -452,7 +449,7 @@ func cleanupWorkWithEndpointSliceDelete(ctx context.Context, c client.Client, en
if apierrors.IsNotFound(err) {
return nil
}
klog.ErrorS(err, "Failed to get work in executionSpace", "namespaceKey", workNamespaceKey.String(), "executionSpace", executionSpace)
klog.Errorf("Failed to get work(%s) in executionSpace(%s): %v", workNamespaceKey.String(), executionSpace, err)
return err
}
@ -475,14 +472,14 @@ func cleanProviderClustersEndpointSliceWork(ctx context.Context, c client.Client
work.Labels[util.EndpointSliceWorkManagedByLabel] = strings.Join(controllerSet.UnsortedList(), ".")
if err := c.Update(ctx, work); err != nil {
klog.ErrorS(err, "Failed to update work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to update work(%s/%s): %v", work.Namespace, work.Name, err)
return err
}
return nil
}
if err := c.Delete(ctx, work); err != nil {
klog.ErrorS(err, "Failed to delete work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to delete work(%s/%s): %v", work.Namespace, work.Name, err)
return err
}

View File

@ -66,7 +66,7 @@ type EndpointsliceDispatchController struct {
// Reconcile performs a full reconciliation for the object referred to by the Request.
func (c *EndpointsliceDispatchController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling Work", "namespacedName", req.NamespacedName.String())
klog.V(4).Infof("Reconciling Work %s", req.NamespacedName.String())
work := &workv1alpha1.Work{}
if err := c.Client.Get(ctx, req.NamespacedName, work); err != nil {
@ -83,7 +83,7 @@ func (c *EndpointsliceDispatchController) Reconcile(ctx context.Context, req con
mcsName := util.GetLabelValue(work.Labels, util.MultiClusterServiceNameLabel)
if !work.DeletionTimestamp.IsZero() || mcsName == "" {
if err := c.cleanupEndpointSliceFromConsumerClusters(ctx, work); err != nil {
klog.ErrorS(err, "Failed to cleanup EndpointSlice from consumer clusters for work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to cleanup EndpointSlice from consumer clusters for work %s/%s:%v", work.Namespace, work.Name, err)
return controllerruntime.Result{}, err
}
return controllerruntime.Result{}, nil
@ -93,7 +93,7 @@ func (c *EndpointsliceDispatchController) Reconcile(ctx context.Context, req con
mcs := &networkingv1alpha1.MultiClusterService{}
if err := c.Client.Get(ctx, types.NamespacedName{Namespace: mcsNS, Name: mcsName}, mcs); err != nil {
if apierrors.IsNotFound(err) {
klog.ErrorS(err, "MultiClusterService is not found", "namespace", mcsNS, "name", mcsName)
klog.Warningf("MultiClusterService %s/%s is not found", mcsNS, mcsName)
return controllerruntime.Result{}, nil
}
return controllerruntime.Result{}, err
@ -185,7 +185,7 @@ func (c *EndpointsliceDispatchController) newClusterFunc() handler.MapFunc {
mcsList := &networkingv1alpha1.MultiClusterServiceList{}
if err := c.Client.List(ctx, mcsList, &client.ListOptions{}); err != nil {
klog.ErrorS(err, "Failed to list MultiClusterService")
klog.Errorf("Failed to list MultiClusterService, error: %v", err)
return nil
}
@ -193,7 +193,7 @@ func (c *EndpointsliceDispatchController) newClusterFunc() handler.MapFunc {
for _, mcs := range mcsList.Items {
clusterSet, err := helper.GetConsumerClusters(c.Client, mcs.DeepCopy())
if err != nil {
klog.ErrorS(err, "Failed to get provider clusters")
klog.Errorf("Failed to get provider clusters, error: %v", err)
continue
}
@ -203,7 +203,7 @@ func (c *EndpointsliceDispatchController) newClusterFunc() handler.MapFunc {
workList, err := c.getClusterEndpointSliceWorks(ctx, mcs.Namespace, mcs.Name)
if err != nil {
klog.ErrorS(err, "Failed to list work")
klog.Errorf("Failed to list work, error: %v", err)
continue
}
for _, work := range workList {
@ -229,7 +229,7 @@ func (c *EndpointsliceDispatchController) getClusterEndpointSliceWorks(ctx conte
util.MultiClusterServiceNamespaceLabel: mcsNamespace,
}),
}); err != nil {
klog.ErrorS(err, "Failed to list work")
klog.Errorf("Failed to list work, error: %v", err)
return nil, err
}
@ -249,7 +249,7 @@ func (c *EndpointsliceDispatchController) newMultiClusterServiceFunc() handler.M
workList, err := c.getClusterEndpointSliceWorks(ctx, mcsNamespace, mcsName)
if err != nil {
klog.ErrorS(err, "Failed to list work")
klog.Errorf("Failed to list work, error: %v", err)
return nil
}
@ -273,7 +273,7 @@ func (c *EndpointsliceDispatchController) cleanOrphanDispatchedEndpointSlice(ctx
util.MultiClusterServiceNameLabel: mcs.Name,
util.MultiClusterServiceNamespaceLabel: mcs.Namespace,
})}); err != nil {
klog.ErrorS(err, "Failed to list works")
klog.Errorf("Failed to list works, error is: %v", err)
return err
}
@ -285,13 +285,13 @@ func (c *EndpointsliceDispatchController) cleanOrphanDispatchedEndpointSlice(ctx
consumerClusters, err := helper.GetConsumerClusters(c.Client, mcs)
if err != nil {
klog.ErrorS(err, "Failed to get consumer clusters")
klog.Errorf("Failed to get consumer clusters, error is: %v", err)
return err
}
cluster, err := names.GetClusterName(work.Namespace)
if err != nil {
klog.ErrorS(err, "Failed to get cluster name for work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to get cluster name for work %s/%s", work.Namespace, work.Name)
return err
}
@ -300,7 +300,7 @@ func (c *EndpointsliceDispatchController) cleanOrphanDispatchedEndpointSlice(ctx
}
if err = c.Client.Delete(ctx, work.DeepCopy()); err != nil {
klog.ErrorS(err, "Failed to delete work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to delete work %s/%s, error is: %v", work.Namespace, work.Name, err)
return err
}
}
@ -311,13 +311,13 @@ func (c *EndpointsliceDispatchController) cleanOrphanDispatchedEndpointSlice(ctx
func (c *EndpointsliceDispatchController) dispatchEndpointSlice(ctx context.Context, work *workv1alpha1.Work, mcs *networkingv1alpha1.MultiClusterService) error {
epsSourceCluster, err := names.GetClusterName(work.Namespace)
if err != nil {
klog.ErrorS(err, "Failed to get EndpointSlice source cluster name for work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to get EndpointSlice source cluster name for work %s/%s", work.Namespace, work.Name)
return err
}
consumerClusters, err := helper.GetConsumerClusters(c.Client, mcs)
if err != nil {
klog.ErrorS(err, "Failed to get consumer clusters")
klog.Errorf("Failed to get consumer clusters, error is: %v", err)
return err
}
for clusterName := range consumerClusters {
@ -330,7 +330,7 @@ func (c *EndpointsliceDispatchController) dispatchEndpointSlice(ctx context.Cont
c.EventRecorder.Eventf(mcs, corev1.EventTypeWarning, events.EventReasonClusterNotFound, "Consumer cluster %s is not found", clusterName)
continue
}
klog.ErrorS(err, "Failed to get cluster", "cluster", clusterName)
klog.Errorf("Failed to get cluster %s, error is: %v", clusterName, err)
return err
}
if !util.IsClusterReady(&clusterObj.Status) {
@ -361,13 +361,13 @@ func (c *EndpointsliceDispatchController) ensureEndpointSliceWork(ctx context.Co
manifest := work.Spec.Workload.Manifests[0]
unstructuredObj := &unstructured.Unstructured{}
if err := unstructuredObj.UnmarshalJSON(manifest.Raw); err != nil {
klog.ErrorS(err, "Failed to unmarshal work manifest")
klog.Errorf("Failed to unmarshal work manifest, error is: %v", err)
return err
}
endpointSlice := &discoveryv1.EndpointSlice{}
if err := helper.ConvertToTypedObject(unstructuredObj, endpointSlice); err != nil {
klog.ErrorS(err, "Failed to convert unstructured object to typed object")
klog.Errorf("Failed to convert unstructured object to typed object, error is: %v", err)
return err
}
@ -397,12 +397,12 @@ func (c *EndpointsliceDispatchController) ensureEndpointSliceWork(ctx context.Co
}
unstructuredEPS, err := helper.ToUnstructured(endpointSlice)
if err != nil {
klog.ErrorS(err, "Failed to convert typed object to unstructured object")
klog.Errorf("Failed to convert typed object to unstructured object, error is: %v", err)
return err
}
if err := ctrlutil.CreateOrUpdateWork(ctx, c.Client, workMeta, unstructuredEPS); err != nil {
klog.ErrorS(err, "Failed to dispatch EndpointSlice",
"namespace", work.GetNamespace(), "name", work.GetName(), "providerCluster", providerCluster, "consumerCluster", consumerCluster)
klog.Errorf("Failed to dispatch EndpointSlice %s/%s from %s to cluster %s:%v",
work.GetNamespace(), work.GetName(), providerCluster, consumerCluster, err)
return err
}
@ -414,13 +414,13 @@ func (c *EndpointsliceDispatchController) cleanupEndpointSliceFromConsumerCluste
workList := &workv1alpha1.WorkList{}
err := c.Client.List(ctx, workList)
if err != nil {
klog.ErrorS(err, "Failed to list works")
klog.Errorf("Failed to list works serror: %v", err)
return err
}
epsSourceCluster, err := names.GetClusterName(work.Namespace)
if err != nil {
klog.ErrorS(err, "Failed to get EndpointSlice provider cluster name for work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to get EndpointSlice provider cluster name for work %s/%s", work.Namespace, work.Name)
return err
}
for _, item := range workList.Items {
@ -434,7 +434,7 @@ func (c *EndpointsliceDispatchController) cleanupEndpointSliceFromConsumerCluste
if controllerutil.RemoveFinalizer(work, util.MCSEndpointSliceDispatchControllerFinalizer) {
if err := c.Client.Update(ctx, work); err != nil {
klog.ErrorS(err, "Failed to remove finalizer for work", "finalizer", util.MCSEndpointSliceDispatchControllerFinalizer, "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to remove %s finalizer for work %s/%s:%v", util.MCSEndpointSliceDispatchControllerFinalizer, work.Namespace, work.Name, err)
return err
}
}

View File

@ -69,7 +69,7 @@ type MCSController struct {
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *MCSController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling MultiClusterService", "namespace", req.Namespace, "name", req.Name)
klog.V(4).Infof("Reconciling MultiClusterService(%s/%s)", req.Namespace, req.Name)
mcs := &networkingv1alpha1.MultiClusterService{}
if err := c.Client.Get(ctx, req.NamespacedName, mcs); err != nil {
@ -77,7 +77,7 @@ func (c *MCSController) Reconcile(ctx context.Context, req controllerruntime.Req
// The mcs no longer exist, in which case we stop processing.
return controllerruntime.Result{}, nil
}
klog.ErrorS(err, "Failed to get MultiClusterService object", "namespacedName", req.NamespacedName)
klog.Errorf("Failed to get MultiClusterService object(%s):%v", req.NamespacedName, err)
return controllerruntime.Result{}, err
}
@ -103,7 +103,7 @@ func (c *MCSController) Reconcile(ctx context.Context, req controllerruntime.Req
}
func (c *MCSController) handleMultiClusterServiceDelete(ctx context.Context, mcs *networkingv1alpha1.MultiClusterService) (controllerruntime.Result, error) {
klog.V(4).InfoS("Begin to handle MultiClusterService delete event", "namespace", mcs.Namespace, "name", mcs.Name)
klog.V(4).Infof("Begin to handle MultiClusterService(%s/%s) delete event", mcs.Namespace, mcs.Name)
if err := c.retrieveService(ctx, mcs); err != nil {
c.EventRecorder.Event(mcs, corev1.EventTypeWarning, events.EventReasonSyncServiceFailed,
@ -120,12 +120,12 @@ func (c *MCSController) handleMultiClusterServiceDelete(ctx context.Context, mcs
if controllerutil.RemoveFinalizer(mcs, util.MCSControllerFinalizer) {
err := c.Client.Update(ctx, mcs)
if err != nil {
klog.ErrorS(err, "Failed to update MultiClusterService with finalizer", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to update MultiClusterService(%s/%s) with finalizer:%v", mcs.Namespace, mcs.Name, err)
return controllerruntime.Result{}, err
}
}
klog.V(4).InfoS("Success to delete MultiClusterService", "namespace", mcs.Namespace, "name", mcs.Name)
klog.V(4).Infof("Success to delete MultiClusterService(%s/%s)", mcs.Namespace, mcs.Name)
return controllerruntime.Result{}, nil
}
@ -135,7 +135,7 @@ func (c *MCSController) retrieveMultiClusterService(ctx context.Context, mcs *ne
networkingv1alpha1.MultiClusterServicePermanentIDLabel: mcsID,
})
if err != nil {
klog.ErrorS(err, "Failed to list work by MultiClusterService", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to list work by MultiClusterService(%s/%s): %v", mcs.Namespace, mcs.Name, err)
return err
}
@ -145,7 +145,7 @@ func (c *MCSController) retrieveMultiClusterService(ctx context.Context, mcs *ne
}
clusterName, err := names.GetClusterName(work.Namespace)
if err != nil {
klog.ErrorS(err, "Failed to get member cluster name for work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to get member cluster name for work %s/%s:%v", work.Namespace, work.Name, work)
continue
}
@ -154,17 +154,17 @@ func (c *MCSController) retrieveMultiClusterService(ctx context.Context, mcs *ne
}
if err = c.cleanProviderEndpointSliceWork(ctx, work.DeepCopy()); err != nil {
klog.ErrorS(err, "Failed to clean provider EndpointSlice work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to clean provider EndpointSlice work(%s/%s):%v", work.Namespace, work.Name, err)
return err
}
if err = c.Client.Delete(ctx, work.DeepCopy()); err != nil && !apierrors.IsNotFound(err) {
klog.ErrorS(err, "Error while deleting work", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Error while deleting work(%s/%s): %v", work.Namespace, work.Name, err)
return err
}
}
klog.V(4).InfoS("Success to clean up MultiClusterService", "namespace", mcs.Namespace, "name", mcs.Name)
klog.V(4).Infof("Success to clean up MultiClusterService(%s/%s) work: %v", mcs.Namespace, mcs.Name, err)
return nil
}
@ -177,7 +177,7 @@ func (c *MCSController) cleanProviderEndpointSliceWork(ctx context.Context, work
util.MultiClusterServiceNamespaceLabel: util.GetLabelValue(work.Labels, util.MultiClusterServiceNamespaceLabel),
}),
}); err != nil {
klog.ErrorS(err, "Failed to list workList reported by work(MultiClusterService)", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to list workList reported by work(MultiClusterService)(%s/%s): %v", work.Namespace, work.Name, err)
return err
}
@ -204,16 +204,16 @@ func (c *MCSController) cleanProviderEndpointSliceWork(ctx context.Context, work
}
func (c *MCSController) handleMultiClusterServiceCreateOrUpdate(ctx context.Context, mcs *networkingv1alpha1.MultiClusterService) error {
klog.V(4).InfoS("Begin to handle MultiClusterService create or update event", "namespace", mcs.Namespace, "name", mcs.Name)
klog.V(4).Infof("Begin to handle MultiClusterService(%s/%s) create or update event", mcs.Namespace, mcs.Name)
providerClusters, err := helper.GetProviderClusters(c.Client, mcs)
if err != nil {
klog.ErrorS(err, "Failed to get provider clusters by MultiClusterService", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to get provider clusters by MultiClusterService(%s/%s):%v", mcs.Namespace, mcs.Name, err)
return err
}
consumerClusters, err := helper.GetConsumerClusters(c.Client, mcs)
if err != nil {
klog.ErrorS(err, "Failed to get consumer clusters by MultiClusterService", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to get consumer clusters by MultiClusterService(%s/%s):%v", mcs.Namespace, mcs.Name, err)
return err
}
@ -228,7 +228,7 @@ func (c *MCSController) handleMultiClusterServiceCreateOrUpdate(ctx context.Cont
if controllerutil.RemoveFinalizer(mcs, util.MCSControllerFinalizer) {
err := c.Client.Update(ctx, mcs)
if err != nil {
klog.ErrorS(err, "Failed to remove finalizer from MultiClusterService", "finalizer", util.MCSControllerFinalizer, "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to remove finalizer(%s) from MultiClusterService(%s/%s):%v", util.MCSControllerFinalizer, mcs.Namespace, mcs.Name, err)
return err
}
}
@ -239,7 +239,7 @@ func (c *MCSController) handleMultiClusterServiceCreateOrUpdate(ctx context.Cont
if controllerutil.AddFinalizer(mcs, util.MCSControllerFinalizer) {
err = c.Client.Update(ctx, mcs)
if err != nil {
klog.ErrorS(err, "Failed to add finalizer to MultiClusterService", "finalizer", util.MCSControllerFinalizer, "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to add finalizer(%s) to MultiClusterService(%s/%s): %v ", util.MCSControllerFinalizer, mcs.Namespace, mcs.Name, err)
return err
}
}
@ -259,7 +259,7 @@ func (c *MCSController) handleMultiClusterServiceCreateOrUpdate(ctx context.Cont
err = c.Client.Get(ctx, types.NamespacedName{Namespace: mcs.Namespace, Name: mcs.Name}, svc)
// If the Service is deleted, the Service's ResourceBinding will be cleaned by GC
if err != nil {
klog.ErrorS(err, "Failed to get service", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to get service(%s/%s):%v", mcs.Namespace, mcs.Name, err)
return err
}
@ -268,7 +268,7 @@ func (c *MCSController) handleMultiClusterServiceCreateOrUpdate(ctx context.Cont
return err
}
klog.V(4).InfoS("Success to reconcile MultiClusterService", "namespace", mcs.Namespace, "name", mcs.Name)
klog.V(4).Infof("Success to reconcile MultiClusterService(%s/%s)", mcs.Namespace, mcs.Name)
return nil
}
@ -280,7 +280,7 @@ func (c *MCSController) propagateMultiClusterService(ctx context.Context, mcs *n
c.EventRecorder.Eventf(mcs, corev1.EventTypeWarning, events.EventReasonClusterNotFound, "Provider cluster %s is not found", clusterName)
continue
}
klog.ErrorS(err, "Failed to get cluster", "cluster", clusterName)
klog.Errorf("Failed to get cluster %s, error is: %v", clusterName, err)
return err
}
if !util.IsClusterReady(&clusterObj.Status) {
@ -306,12 +306,12 @@ func (c *MCSController) propagateMultiClusterService(ctx context.Context, mcs *n
mcsObj, err := helper.ToUnstructured(mcs)
if err != nil {
klog.ErrorS(err, "Failed to convert MultiClusterService to unstructured object", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to convert MultiClusterService(%s/%s) to unstructured object, err is %v", mcs.Namespace, mcs.Name, err)
return err
}
if err = ctrlutil.CreateOrUpdateWork(ctx, c, workMeta, mcsObj, ctrlutil.WithSuspendDispatching(true)); err != nil {
klog.ErrorS(err, "Failed to create or update MultiClusterService work in the given member cluster",
"namespace", mcs.Namespace, "name", mcs.Name, "cluster", clusterName)
klog.Errorf("Failed to create or update MultiClusterService(%s/%s) work in the given member cluster %s, err is %v",
mcs.Namespace, mcs.Name, clusterName, err)
return err
}
}
@ -323,7 +323,7 @@ func (c *MCSController) retrieveService(ctx context.Context, mcs *networkingv1al
svc := &corev1.Service{}
err := c.Client.Get(ctx, types.NamespacedName{Namespace: mcs.Namespace, Name: mcs.Name}, svc)
if err != nil && !apierrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to get service", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to get service(%s/%s):%v", mcs.Namespace, mcs.Name, err)
return err
}
@ -338,7 +338,7 @@ func (c *MCSController) retrieveService(ctx context.Context, mcs *networkingv1al
}
if err = c.Client.Update(ctx, svcCopy); err != nil {
klog.ErrorS(err, "Failed to update service", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to update service(%s/%s):%v", mcs.Namespace, mcs.Name, err)
return err
}
@ -348,7 +348,7 @@ func (c *MCSController) retrieveService(ctx context.Context, mcs *networkingv1al
if apierrors.IsNotFound(err) {
return nil
}
klog.ErrorS(err, "Failed to get ResourceBinding", "namespace", mcs.Namespace, "name", names.GenerateBindingName(svc.Kind, svc.Name))
klog.Errorf("Failed to get ResourceBinding(%s/%s):%v", mcs.Namespace, names.GenerateBindingName(svc.Kind, svc.Name), err)
return err
}
@ -364,7 +364,7 @@ func (c *MCSController) retrieveService(ctx context.Context, mcs *networkingv1al
delete(rbCopy.Labels, networkingv1alpha1.MultiClusterServicePermanentIDLabel)
}
if err := c.Client.Update(ctx, rbCopy); err != nil {
klog.ErrorS(err, "Failed to update ResourceBinding", "namespace", mcs.Namespace, "name", names.GenerateBindingName(svc.Kind, svc.Name))
klog.Errorf("Failed to update ResourceBinding(%s/%s):%v", mcs.Namespace, names.GenerateBindingName(svc.Kind, svc.Name), err)
return err
}
@ -374,13 +374,13 @@ func (c *MCSController) retrieveService(ctx context.Context, mcs *networkingv1al
func (c *MCSController) propagateService(ctx context.Context, mcs *networkingv1alpha1.MultiClusterService, svc *corev1.Service,
providerClusters, consumerClusters sets.Set[string]) error {
if err := c.claimMultiClusterServiceForService(ctx, svc, mcs); err != nil {
klog.ErrorS(err, "Failed to claim for Service", "namespace", svc.Namespace, "name", svc.Name)
klog.Errorf("Failed to claim for Service(%s/%s), err is %v", svc.Namespace, svc.Name, err)
return err
}
binding, err := c.buildResourceBinding(svc, mcs, providerClusters, consumerClusters)
if err != nil {
klog.ErrorS(err, "Failed to build ResourceBinding for Service", "namespace", svc.Namespace, "name", svc.Name)
klog.Errorf("Failed to build ResourceBinding for Service(%s/%s), err is %v", svc.Namespace, svc.Name, err)
return err
}
@ -417,17 +417,17 @@ func (c *MCSController) propagateService(ctx context.Context, mcs *networkingv1a
return nil
})
if err != nil {
klog.ErrorS(err, "Failed to create/update ResourceBinding", "namespace", bindingCopy.Namespace, "name", bindingCopy.Name)
klog.Errorf("Failed to create/update ResourceBinding(%s/%s):%v", bindingCopy.Namespace, bindingCopy.Name, err)
return err
}
switch operationResult {
case controllerutil.OperationResultCreated:
klog.InfoS("Create ResourceBinding successfully.", "namespace", binding.GetNamespace(), "name", binding.GetName())
klog.Infof("Create ResourceBinding(%s/%s) successfully.", binding.GetNamespace(), binding.GetName())
case controllerutil.OperationResultUpdated:
klog.InfoS("Update ResourceBinding successfully.", "namespace", binding.GetNamespace(), "name", binding.GetName())
klog.Infof("Update ResourceBinding(%s/%s) successfully.", binding.GetNamespace(), binding.GetName())
default:
klog.V(2).InfoS("ResourceBinding is up to date.", "namespace", binding.GetNamespace(), "name", binding.GetName())
klog.V(2).Infof("ResourceBinding(%s/%s) is up to date.", binding.GetNamespace(), binding.GetName())
}
return nil
@ -500,7 +500,7 @@ func (c *MCSController) claimMultiClusterServiceForService(ctx context.Context,
svcCopy.Annotations[networkingv1alpha1.MultiClusterServiceNamespaceAnnotation] = mcs.Namespace
if err := c.Client.Update(ctx, svcCopy); err != nil {
klog.ErrorS(err, "Failed to update service", "namespace", svc.Namespace, "name", svc.Name)
klog.Errorf("Failed to update service(%s/%s):%v ", svc.Namespace, svc.Name, err)
return err
}
@ -608,7 +608,7 @@ func (c *MCSController) serviceHasCrossClusterMultiClusterService(svc *corev1.Se
if err := c.Client.Get(context.Background(),
types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}, mcs); err != nil {
if !apierrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to get MultiClusterService", "namespace", svc.Namespace, "name", svc.Name)
klog.Errorf("Failed to get MultiClusterService(%s/%s):%v", svc.Namespace, svc.Name, err)
}
return false
}
@ -626,10 +626,10 @@ func (c *MCSController) clusterMapFunc() handler.MapFunc {
return nil
}
klog.V(4).InfoS("Begin to sync mcs with cluster", "cluster", clusterName)
klog.V(4).Infof("Begin to sync mcs with cluster %s.", clusterName)
mcsList := &networkingv1alpha1.MultiClusterServiceList{}
if err := c.Client.List(ctx, mcsList, &client.ListOptions{}); err != nil {
klog.ErrorS(err, "Failed to list MultiClusterService")
klog.Errorf("Failed to list MultiClusterService, error: %v", err)
return nil
}
@ -658,7 +658,7 @@ func (c *MCSController) needSyncMultiClusterService(mcs *networkingv1alpha1.Mult
providerClusters, err := helper.GetProviderClusters(c.Client, mcs)
if err != nil {
klog.ErrorS(err, "Failed to get provider clusters by MultiClusterService", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to get provider clusters by MultiClusterService(%s/%s):%v", mcs.Namespace, mcs.Name, err)
return false, err
}
if providerClusters.Has(clusterName) {
@ -667,7 +667,7 @@ func (c *MCSController) needSyncMultiClusterService(mcs *networkingv1alpha1.Mult
consumerClusters, err := helper.GetConsumerClusters(c.Client, mcs)
if err != nil {
klog.ErrorS(err, "Failed to get consumer clusters by MultiClusterService", "namespace", mcs.Namespace, "name", mcs.Name)
klog.Errorf("Failed to get consumer clusters by MultiClusterService(%s/%s):%v", mcs.Namespace, mcs.Name, err)
return false, err
}
if consumerClusters.Has(clusterName) {

View File

@ -125,7 +125,7 @@ type ClusterStatusController struct {
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will requeue the reconcile key after the duration.
func (c *ClusterStatusController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Syncing cluster status", "cluster", req.NamespacedName.Name)
klog.V(4).Infof("Syncing cluster status: %s", req.NamespacedName.Name)
cluster := &clusterv1alpha1.Cluster{}
if err := c.Client.Get(ctx, req.NamespacedName, cluster); err != nil {
@ -151,9 +151,9 @@ func (c *ClusterStatusController) Reconcile(ctx context.Context, req controllerr
}
// start syncing status only when the finalizer is present on the given Cluster to
// avoid conflict with the cluster controller.
// avoid conflict with cluster controller.
if !controllerutil.ContainsFinalizer(cluster, util.ClusterControllerFinalizer) {
klog.V(2).InfoS("Waiting finalizer present for member cluster", "cluster", cluster.Name)
klog.V(2).Infof("Waiting finalizer present for member cluster: %s", cluster.Name)
return controllerruntime.Result{Requeue: true}, nil
}
@ -190,7 +190,7 @@ func (c *ClusterStatusController) syncClusterStatus(ctx context.Context, cluster
// create a ClusterClient for the given member cluster
clusterClient, err := c.ClusterClientSetFunc(cluster.Name, c.Client, c.ClusterClientOption)
if err != nil {
klog.ErrorS(err, "Failed to create a ClusterClient for the given member cluster", "cluster", cluster.Name)
klog.Errorf("Failed to create a ClusterClient for the given member cluster: %v, err is : %v", cluster.Name, err)
return setStatusCollectionFailedCondition(ctx, c.Client, cluster, fmt.Sprintf("failed to create a ClusterClient: %v", err))
}
@ -200,8 +200,8 @@ func (c *ClusterStatusController) syncClusterStatus(ctx context.Context, cluster
// cluster is offline after retry timeout, update cluster status immediately and return.
if !online && readyCondition.Status != metav1.ConditionTrue {
klog.V(2).InfoS("Cluster still offline after ensuring offline is set",
"cluster", cluster.Name, "duration", c.ClusterFailureThreshold.Duration)
klog.V(2).Infof("Cluster(%s) still offline after %s, ensuring offline is set.",
cluster.Name, c.ClusterFailureThreshold.Duration)
return updateStatusCondition(ctx, c.Client, cluster, *readyCondition)
}
@ -235,7 +235,7 @@ func (c *ClusterStatusController) setCurrentClusterStatus(clusterClient *util.Cl
var conditions []metav1.Condition
clusterVersion, err := getKubernetesVersion(clusterClient)
if err != nil {
klog.ErrorS(err, "Failed to get Kubernetes version for Cluster", "cluster", cluster.GetName())
klog.Errorf("Failed to get Kubernetes version for Cluster %s. Error: %v.", cluster.GetName(), err)
}
currentClusterStatus.KubernetesVersion = clusterVersion
@ -245,11 +245,11 @@ func (c *ClusterStatusController) setCurrentClusterStatus(clusterClient *util.Cl
if len(apiEnables) == 0 {
apiEnablementCondition = util.NewCondition(clusterv1alpha1.ClusterConditionCompleteAPIEnablements,
apiEnablementEmptyAPIEnablements, "collected empty APIEnablements from the cluster", metav1.ConditionFalse)
klog.ErrorS(err, "Failed to get any APIs installed in Cluster", "cluster", cluster.GetName())
klog.Errorf("Failed to get any APIs installed in Cluster %s. Error: %v.", cluster.GetName(), err)
} else if err != nil {
apiEnablementCondition = util.NewCondition(clusterv1alpha1.ClusterConditionCompleteAPIEnablements,
apiEnablementPartialAPIEnablements, fmt.Sprintf("might collect partial APIEnablements(%d) from the cluster", len(apiEnables)), metav1.ConditionFalse)
klog.ErrorS(err, "Collected partial number of APIs installed in Cluster", "numApiEnablements", len(apiEnables), "cluster", cluster.GetName())
klog.Warningf("Maybe get partial(%d) APIs installed in Cluster %s. Error: %v.", len(apiEnables), cluster.GetName(), err)
} else {
apiEnablementCondition = util.NewCondition(clusterv1alpha1.ClusterConditionCompleteAPIEnablements,
apiEnablementsComplete, "collected complete APIEnablements from the cluster", metav1.ConditionTrue)
@ -261,19 +261,19 @@ func (c *ClusterStatusController) setCurrentClusterStatus(clusterClient *util.Cl
// get or create informer for pods and nodes in member cluster
clusterInformerManager, err := c.buildInformerForCluster(clusterClient)
if err != nil {
klog.ErrorS(err, "Failed to get or create informer for Cluster", "cluster", cluster.GetName())
klog.Errorf("Failed to get or create informer for Cluster %s. Error: %v.", cluster.GetName(), err)
// in large-scale clusters, the timeout may occur.
// if clusterInformerManager fails to be built, should be returned, otherwise, it may cause a nil pointer
return nil, err
}
nodes, err := listNodes(clusterInformerManager)
if err != nil {
klog.ErrorS(err, "Failed to list nodes for Cluster", "cluster", cluster.GetName())
klog.Errorf("Failed to list nodes for Cluster %s. Error: %v.", cluster.GetName(), err)
}
pods, err := listPods(clusterInformerManager)
if err != nil {
klog.ErrorS(err, "Failed to list pods for Cluster", "cluster", cluster.GetName())
klog.Errorf("Failed to list pods for Cluster %s. Error: %v.", cluster.GetName(), err)
}
currentClusterStatus.NodeSummary = getNodeSummary(nodes)
currentClusterStatus.ResourceSummary = getResourceSummary(nodes, pods)
@ -296,7 +296,7 @@ func (c *ClusterStatusController) updateStatusIfNeeded(ctx context.Context, clus
meta.SetStatusCondition(&currentClusterStatus.Conditions, condition)
}
if !equality.Semantic.DeepEqual(cluster.Status, currentClusterStatus) {
klog.V(4).InfoS("Start to update cluster status", "cluster", cluster.Name)
klog.V(4).Infof("Start to update cluster status: %s", cluster.Name)
err := retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
_, err = helper.UpdateStatus(ctx, c.Client, cluster, func() error {
cluster.Status.KubernetesVersion = currentClusterStatus.KubernetesVersion
@ -311,7 +311,7 @@ func (c *ClusterStatusController) updateStatusIfNeeded(ctx context.Context, clus
return err
})
if err != nil {
klog.ErrorS(err, "Failed to update health status of the member cluster", "cluster", cluster.Name)
klog.Errorf("Failed to update health status of the member cluster: %v, err is : %v", cluster.Name, err)
return err
}
}
@ -320,7 +320,7 @@ func (c *ClusterStatusController) updateStatusIfNeeded(ctx context.Context, clus
}
func updateStatusCondition(ctx context.Context, c client.Client, cluster *clusterv1alpha1.Cluster, conditions ...metav1.Condition) error {
klog.V(4).InfoS("Start to update cluster status condition", "cluster", cluster.Name)
klog.V(4).Infof("Start to update cluster(%s) status condition", cluster.Name)
err := retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
_, err = helper.UpdateStatus(ctx, c, cluster, func() error {
for _, condition := range conditions {
@ -331,7 +331,7 @@ func updateStatusCondition(ctx context.Context, c client.Client, cluster *cluste
return err
})
if err != nil {
klog.ErrorS(err, "Failed to update status condition of the member cluster", "cluster", cluster.Name)
klog.Errorf("Failed to update status condition of the member cluster: %v, err is : %v", cluster.Name, err)
return err
}
return nil
@ -344,7 +344,7 @@ func (c *ClusterStatusController) initializeGenericInformerManagerForCluster(clu
dynamicClient, err := c.ClusterDynamicClientSetFunc(clusterClient.ClusterName, c.Client, c.ClusterClientOption)
if err != nil {
klog.ErrorS(err, "Failed to build dynamic cluster client", "cluster", clusterClient.ClusterName)
klog.Errorf("Failed to build dynamic cluster client for cluster %s.", clusterClient.ClusterName)
return
}
c.GenericInformerManager.ForCluster(clusterClient.ClusterName, dynamicClient.DynamicClientSet, 0)
@ -366,7 +366,7 @@ func (c *ClusterStatusController) buildInformerForCluster(clusterClient *util.Cl
if !singleClusterInformerManager.IsInformerSynced(gvr) {
allSynced = false
if _, err := singleClusterInformerManager.Lister(gvr); err != nil {
klog.ErrorS(err, "Failed to get the lister for gvr", "gvr", gvr.String())
klog.Errorf("Failed to get the lister for gvr %s: %v", gvr.String(), err)
}
}
}
@ -389,7 +389,7 @@ func (c *ClusterStatusController) buildInformerForCluster(clusterClient *util.Cl
}
return nil
}(); err != nil {
klog.ErrorS(err, "Failed to sync cache for cluster", "cluster", clusterClient.ClusterName)
klog.Errorf("Failed to sync cache for cluster: %s, error: %v", clusterClient.ClusterName, err)
c.TypedInformerManager.Stop(clusterClient.ClusterName)
return nil, err
}
@ -422,12 +422,12 @@ func (c *ClusterStatusController) initLeaseController(cluster *clusterv1alpha1.C
// start syncing lease
go func() {
klog.InfoS("Starting syncing lease for cluster", "cluster", cluster.Name)
klog.Infof("Starting syncing lease for cluster: %s", cluster.Name)
// lease controller will keep running until the stop channel is closed(context is canceled)
clusterLeaseController.Run(ctx)
klog.InfoS("Stop syncing lease for cluster", "cluster", cluster.Name)
klog.Infof("Stop syncing lease for cluster: %s", cluster.Name)
c.ClusterLeaseControllers.Delete(cluster.Name) // ensure the cache is clean
}()
}
@ -440,12 +440,12 @@ func getClusterHealthStatus(clusterClient *util.ClusterClient) (online, healthy
}
if err != nil {
klog.ErrorS(err, "Failed to do cluster health check for cluster", "cluster", clusterClient.ClusterName)
klog.Errorf("Failed to do cluster health check for cluster %v, err is : %v ", clusterClient.ClusterName, err)
return false, false
}
if healthStatus != http.StatusOK {
klog.InfoS("Member cluster isn't healthy", "cluster", clusterClient.ClusterName)
klog.Infof("Member cluster %v isn't healthy", clusterClient.ClusterName)
return true, false
}
@ -627,8 +627,7 @@ func getNodeAvailable(allocatable corev1.ResourceList, podResources *util.Resour
// When too many pods have been created, scheduling will fail so that the allocating pods number may be huge.
// If allowedPodNumber is less than or equal to 0, we don't allow more pods to be created.
if allowedPodNumber <= 0 {
klog.InfoS("The number of schedulable Pods on the node is less than or equal to 0, " +
"we won't add the node to cluster resource models.")
klog.Warningf("The number of schedulable Pods on the node is less than or equal to 0, we won't add the node to cluster resource models.")
return nil
}
@ -648,7 +647,7 @@ func getAllocatableModelings(cluster *clusterv1alpha1.Cluster, nodes []*corev1.N
}
modelingSummary, err := modeling.InitSummary(cluster.Spec.ResourceModels)
if err != nil {
klog.ErrorS(err, "Failed to init cluster summary from cluster resource models for Cluster", "cluster", cluster.GetName())
klog.Errorf("Failed to init cluster summary from cluster resource models for Cluster %s. Error: %v.", cluster.GetName(), err)
return nil
}

View File

@ -58,7 +58,7 @@ type CRBStatusController struct {
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *CRBStatusController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling ClusterResourceBinding", "name", req.NamespacedName.Name)
klog.V(4).Infof("Reconciling ClusterResourceBinding %s.", req.NamespacedName.String())
binding := &workv1alpha2.ClusterResourceBinding{}
if err := c.Client.Get(ctx, req.NamespacedName, binding); err != nil {
@ -112,7 +112,8 @@ func (c *CRBStatusController) SetupWithManager(mgr controllerruntime.Manager) er
func (c *CRBStatusController) syncBindingStatus(ctx context.Context, binding *workv1alpha2.ClusterResourceBinding) error {
err := helper.AggregateClusterResourceBindingWorkStatus(ctx, c.Client, binding, c.EventRecorder)
if err != nil {
klog.ErrorS(err, "Failed to aggregate workStatues to clusterResourceBinding", "name", binding.Name)
klog.Errorf("Failed to aggregate workStatues to clusterResourceBinding(%s), Error: %v",
binding.Name, err)
return err
}

View File

@ -58,7 +58,7 @@ type RBStatusController struct {
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *RBStatusController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling ResourceBinding", "namespace", req.Namespace, "name", req.Name)
klog.V(4).Infof("Reconciling ResourceBinding %s.", req.NamespacedName.String())
binding := &workv1alpha2.ResourceBinding{}
if err := c.Client.Get(ctx, req.NamespacedName, binding); err != nil {
@ -114,7 +114,8 @@ func (c *RBStatusController) SetupWithManager(mgr controllerruntime.Manager) err
func (c *RBStatusController) syncBindingStatus(ctx context.Context, binding *workv1alpha2.ResourceBinding) error {
err := helper.AggregateResourceBindingWorkStatus(ctx, c.Client, binding, c.EventRecorder)
if err != nil {
klog.ErrorS(err, "Failed to aggregate workStatues to ResourceBinding", "namespace", binding.Namespace, "name", binding.Name)
klog.Errorf("Failed to aggregate workStatus to resourceBinding(%s/%s), Error: %v",
binding.Namespace, binding.Name, err)
return err
}

View File

@ -82,7 +82,7 @@ type WorkStatusController struct {
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *WorkStatusController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling status of Work.", "namespace", req.Namespace, "name", req.Name)
klog.V(4).Infof("Reconciling status of Work %s.", req.NamespacedName.String())
work := &workv1alpha1.Work{}
if err := c.Client.Get(ctx, req.NamespacedName, work); err != nil {
@ -104,20 +104,19 @@ func (c *WorkStatusController) Reconcile(ctx context.Context, req controllerrunt
clusterName, err := names.GetClusterName(work.GetNamespace())
if err != nil {
klog.ErrorS(err, "Failed to get member cluster name from Work.", "namespace", work.GetNamespace())
klog.Errorf("Failed to get member cluster name by %s. Error: %v.", work.GetNamespace(), err)
return controllerruntime.Result{}, err
}
cluster, err := util.GetCluster(c.Client, clusterName)
if err != nil {
klog.ErrorS(err, "Failed to get the given member cluster", "cluster", clusterName)
klog.Errorf("Failed to the get given member cluster %s", clusterName)
return controllerruntime.Result{}, err
}
if !util.IsClusterReady(&cluster.Status) {
err := fmt.Errorf("cluster(%s) not ready", cluster.Name)
klog.ErrorS(err, "Stop syncing the Work to the cluster as not ready.", "namespace", work.Namespace, "name", work.Name, "cluster", cluster.Name)
return controllerruntime.Result{}, err
klog.Errorf("Stop syncing the Work(%s/%s) to the cluster(%s) as not ready.", work.Namespace, work.Name, cluster.Name)
return controllerruntime.Result{}, fmt.Errorf("cluster(%s) not ready", cluster.Name)
}
return c.buildResourceInformers(cluster, work)
@ -128,7 +127,7 @@ func (c *WorkStatusController) Reconcile(ctx context.Context, req controllerrunt
func (c *WorkStatusController) buildResourceInformers(cluster *clusterv1alpha1.Cluster, work *workv1alpha1.Work) (controllerruntime.Result, error) {
err := c.registerInformersAndStart(cluster, work)
if err != nil {
klog.ErrorS(err, "Failed to register informer for Work.", "namespace", work.GetNamespace(), "name", work.GetName())
klog.Errorf("Failed to register informer for Work %s/%s. Error: %v.", work.GetNamespace(), work.GetName(), err)
return controllerruntime.Result{}, err
}
return controllerruntime.Result{}, nil
@ -172,13 +171,13 @@ func generateKey(obj interface{}) (util.QueueKey, error) {
func getClusterNameFromAnnotation(resource *unstructured.Unstructured) (string, error) {
workNamespace, exist := resource.GetAnnotations()[workv1alpha2.WorkNamespaceAnnotation]
if !exist {
klog.V(5).InfoS("Ignore resource which is not managed by Karmada.", "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName())
klog.V(5).Infof("Ignore resource(kind=%s, %s/%s) which is not managed by Karmada.", resource.GetKind(), resource.GetNamespace(), resource.GetName())
return "", nil
}
cluster, err := names.GetClusterName(workNamespace)
if err != nil {
klog.ErrorS(err, "Failed to get cluster name from Work.", "namespace", workNamespace)
klog.Errorf("Failed to get cluster name from work namespace: %s, error: %v.", workNamespace, err)
return "", err
}
return cluster, nil
@ -189,9 +188,8 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
ctx := context.Background()
fedKey, ok := key.(keys.FederatedKey)
if !ok {
err := fmt.Errorf("invalid key")
klog.ErrorS(err, "Failed to sync status", "key", key)
return err
klog.Errorf("Failed to sync status as invalid key: %v", key)
return fmt.Errorf("invalid key")
}
observedObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey)
@ -206,7 +204,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
workNamespace, nsExist := observedAnnotations[workv1alpha2.WorkNamespaceAnnotation]
workName, nameExist := observedAnnotations[workv1alpha2.WorkNameAnnotation]
if !nsExist || !nameExist {
klog.InfoS("Ignoring object which is not managed by Karmada.", "object", fedKey.String())
klog.Infof("Ignore object(%s) which not managed by Karmada.", fedKey.String())
return nil
}
@ -217,7 +215,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
return nil
}
klog.ErrorS(err, "Failed to get Work from cache", "namespace", workNamespace, "name", workName)
klog.Errorf("Failed to get Work(%s/%s) from cache: %v", workNamespace, workName, err)
return err
}
@ -230,7 +228,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
return err
}
klog.InfoS("Reflecting resource status to Work.", "kind", observedObj.GetKind(), "resource", observedObj.GetNamespace()+"/"+observedObj.GetName(), "namespace", workNamespace, "name", workName)
klog.Infof("Reflecting the resource(kind=%s, %s/%s) status to the Work(%s/%s).", observedObj.GetKind(), observedObj.GetNamespace(), observedObj.GetName(), workNamespace, workName)
return c.reflectStatus(ctx, workObject, observedObj)
}
@ -246,7 +244,7 @@ func (c *WorkStatusController) updateResource(ctx context.Context, observedObj *
clusterName, err := names.GetClusterName(workObject.Namespace)
if err != nil {
klog.ErrorS(err, "Failed to get member cluster name", "cluster", workObject.Namespace)
klog.Errorf("Failed to get member cluster name: %v", err)
return err
}
@ -257,7 +255,7 @@ func (c *WorkStatusController) updateResource(ctx context.Context, observedObj *
operationResult, updateErr := c.ObjectWatcher.Update(ctx, clusterName, desiredObj, observedObj)
metrics.CountUpdateResourceToCluster(updateErr, desiredObj.GetAPIVersion(), desiredObj.GetKind(), clusterName, string(operationResult))
if updateErr != nil {
klog.ErrorS(updateErr, "Updating resource failed", "resource", fedKey.String())
klog.Errorf("Updating %s failed: %v", fedKey.String(), updateErr)
return updateErr
}
// We can't return even after a success updates, because that might lose the chance to collect status.
@ -284,7 +282,7 @@ func (c *WorkStatusController) handleDeleteEvent(ctx context.Context, key keys.F
return nil
}
klog.ErrorS(err, "Failed to get Work from cache")
klog.Errorf("Failed to get Work from cache: %v", err)
return err
}
@ -318,7 +316,7 @@ func (c *WorkStatusController) recreateResourceIfNeeded(ctx context.Context, wor
if reflect.DeepEqual(desiredGVK, workloadKey.GroupVersionKind()) &&
manifest.GetNamespace() == workloadKey.Namespace &&
manifest.GetName() == workloadKey.Name {
klog.InfoS("Recreating resource.", "resource", workloadKey.String())
klog.Infof("Recreating resource(%s).", workloadKey.String())
err := c.ObjectWatcher.Create(ctx, workloadKey.Cluster, manifest)
metrics.CountCreateResourceToCluster(err, workloadKey.GroupVersion().String(), workloadKey.Kind, workloadKey.Cluster, true)
if err != nil {
@ -351,7 +349,7 @@ func (c *WorkStatusController) updateAppliedCondition(ctx context.Context, work
})
if err != nil {
klog.ErrorS(err, "Failed to update condition of work.", "namespace", work.Namespace, "name", work.Name)
klog.Errorf("Failed to update condition of work %s/%s: %s", work.Namespace, work.Name, err.Error())
}
}
@ -359,7 +357,8 @@ func (c *WorkStatusController) updateAppliedCondition(ctx context.Context, work
func (c *WorkStatusController) reflectStatus(ctx context.Context, work *workv1alpha1.Work, clusterObj *unstructured.Unstructured) error {
statusRaw, err := c.ResourceInterpreter.ReflectStatus(clusterObj)
if err != nil {
klog.ErrorS(err, "Failed to reflect status for object with resourceInterpreter", "kind", clusterObj.GetKind(), "resource", clusterObj.GetNamespace()+"/"+clusterObj.GetName())
klog.Errorf("Failed to reflect status for object(%s/%s/%s) with resourceInterpreter, err: %v",
clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), err)
c.EventRecorder.Eventf(work, corev1.EventTypeWarning, events.EventReasonReflectStatusFailed, "Reflect status for object(%s/%s/%s) failed, err: %s.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), err.Error())
return err
}
@ -390,7 +389,7 @@ func (c *WorkStatusController) reflectStatus(ctx context.Context, work *workv1al
func (c *WorkStatusController) interpretHealth(clusterObj *unstructured.Unstructured, work *workv1alpha1.Work) workv1alpha1.ResourceHealth {
// For kind that doesn't have health check, we treat it as healthy.
if !c.ResourceInterpreter.HookEnabled(clusterObj.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretHealth) {
klog.V(5).InfoS("skipping health assessment for object as customization missing; will treat it as healthy.", "kind", clusterObj.GroupVersionKind(), "resource", clusterObj.GetNamespace()+"/"+clusterObj.GetName())
klog.V(5).Infof("skipping health assessment for object: %v %s/%s as missing customization and will treat it as healthy.", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName())
return workv1alpha1.ResourceHealthy
}
@ -497,7 +496,7 @@ func (c *WorkStatusController) registerInformersAndStart(cluster *clusterv1alpha
}
return nil
}(); err != nil {
klog.ErrorS(err, "Failed to sync cache for cluster", "cluster", cluster.Name)
klog.Errorf("Failed to sync cache for cluster: %s, error: %v", cluster.Name, err)
c.InformerManager.Stop(cluster.Name)
return err
}
@ -512,12 +511,12 @@ func (c *WorkStatusController) getGVRsFromWork(work *workv1alpha1.Work) (map[sch
workload := &unstructured.Unstructured{}
err := workload.UnmarshalJSON(manifest.Raw)
if err != nil {
klog.ErrorS(err, "Failed to unmarshal workload.")
klog.Errorf("Failed to unmarshal workload. Error: %v.", err)
return nil, err
}
gvr, err := restmapper.GetGroupVersionResource(c.RESTMapper, workload.GroupVersionKind())
if err != nil {
klog.ErrorS(err, "Failed to get GVR from GVK for resource.", "namespace", workload.GetNamespace(), "name", workload.GetName())
klog.Errorf("Failed to get GVR from GVK for resource %s/%s. Error: %v.", workload.GetNamespace(), workload.GetName(), err)
return nil, err
}
gvrTargets[gvr] = true
@ -534,7 +533,7 @@ func (c *WorkStatusController) getSingleClusterManager(cluster *clusterv1alpha1.
if singleClusterInformerManager == nil {
dynamicClusterClient, err := c.ClusterDynamicClientSetFunc(cluster.Name, c.Client, c.ClusterClientOption)
if err != nil {
klog.ErrorS(err, "Failed to build dynamic cluster client for cluster.", "cluster", cluster.Name)
klog.Errorf("Failed to build dynamic cluster client for cluster %s.", cluster.Name)
return nil, err
}
singleClusterInformerManager = c.InformerManager.ForCluster(dynamicClusterClient.ClusterName, dynamicClusterClient.DynamicClientSet, 0)
@ -561,7 +560,7 @@ func (c *WorkStatusController) SetupWithManager(mgr controllerruntime.Manager) e
func (c *WorkStatusController) eventf(object *unstructured.Unstructured, eventType, reason, messageFmt string, args ...interface{}) {
ref, err := util.GenEventRef(object)
if err != nil {
klog.ErrorS(err, "Ignoring event. Failed to build event reference.", "reason", reason, "kind", object.GetKind(), "reference", klog.KObj(object))
klog.Errorf("Ignore event(%s) as failing to build event reference for: kind=%s, %s due to %v", reason, object.GetKind(), klog.KObj(object), err)
return
}
c.EventRecorder.Eventf(ref, eventType, reason, messageFmt, args...)

View File

@ -76,13 +76,13 @@ func (c *RebalancerController) SetupWithManager(mgr controllerruntime.Manager) e
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (c *RebalancerController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).InfoS("Reconciling for WorkloadRebalancer %s", req.Name)
klog.V(4).Infof("Reconciling for WorkloadRebalancer %s", req.Name)
// 1. get latest WorkloadRebalancer
rebalancer := &appsv1alpha1.WorkloadRebalancer{}
if err := c.Client.Get(ctx, req.NamespacedName, rebalancer); err != nil {
if apierrors.IsNotFound(err) {
klog.InfoS("no need to reconcile WorkloadRebalancer for it not found")
klog.Infof("no need to reconcile WorkloadRebalancer for it not found")
return controllerruntime.Result{}, nil
}
return controllerruntime.Result{}, err
@ -203,7 +203,7 @@ func (c *RebalancerController) triggerReschedule(ctx context.Context, metadata m
if resource.Workload.Namespace != "" {
binding := &workv1alpha2.ResourceBinding{}
if err := c.Client.Get(ctx, client.ObjectKey{Namespace: resource.Workload.Namespace, Name: bindingName}, binding); err != nil {
klog.ErrorS(err, "get binding for resource failed", "resource", resource.Workload)
klog.Errorf("get binding for resource %+v failed: %+v", resource.Workload, err)
c.recordAndCountRebalancerFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
continue
}
@ -212,7 +212,7 @@ func (c *RebalancerController) triggerReschedule(ctx context.Context, metadata m
binding.Spec.RescheduleTriggeredAt = &metadata.CreationTimestamp
if err := c.Client.Update(ctx, binding); err != nil {
klog.ErrorS(err, "update binding for resource failed", "resource", resource.Workload)
klog.Errorf("update binding for resource %+v failed: %+v", resource.Workload, err)
c.recordAndCountRebalancerFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
continue
}
@ -221,7 +221,7 @@ func (c *RebalancerController) triggerReschedule(ctx context.Context, metadata m
} else {
clusterbinding := &workv1alpha2.ClusterResourceBinding{}
if err := c.Client.Get(ctx, client.ObjectKey{Name: bindingName}, clusterbinding); err != nil {
klog.ErrorS(err, "get cluster binding for resource failed", "resource", resource.Workload)
klog.Errorf("get cluster binding for resource %+v failed: %+v", resource.Workload, err)
c.recordAndCountRebalancerFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
continue
}
@ -230,7 +230,7 @@ func (c *RebalancerController) triggerReschedule(ctx context.Context, metadata m
clusterbinding.Spec.RescheduleTriggeredAt = &metadata.CreationTimestamp
if err := c.Client.Update(ctx, clusterbinding); err != nil {
klog.ErrorS(err, "update cluster binding for resource failed", "resource", resource.Workload)
klog.Errorf("update cluster binding for resource %+v failed: %+v", resource.Workload, err)
c.recordAndCountRebalancerFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
continue
}
@ -239,8 +239,8 @@ func (c *RebalancerController) triggerReschedule(ctx context.Context, metadata m
}
}
klog.V(4).InfoS(fmt.Sprintf("Finish handling WorkloadRebalancer, %d/%d resource success in all, while %d resource need retry",
successNum, len(newStatus.ObservedWorkloads), retryNum), "workloadRebalancer", metadata.Name, "successNum", successNum, "totalNum", len(newStatus.ObservedWorkloads), "retryNum", retryNum)
klog.V(4).Infof("Finish handling WorkloadRebalancer (%s), %d/%d resource success in all, while %d resource need retry",
metadata.Name, successNum, len(newStatus.ObservedWorkloads), retryNum)
return newStatus, retryNum
}
@ -269,26 +269,26 @@ func (c *RebalancerController) updateWorkloadRebalancerStatus(ctx context.Contex
modifiedRebalancer.Status = *newStatus
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
klog.V(4).InfoS("Start to patch WorkloadRebalancer status", "workloadRebalancer", rebalancer.Name)
klog.V(4).Infof("Start to patch WorkloadRebalancer(%s) status", rebalancer.Name)
if err = c.Client.Status().Patch(ctx, modifiedRebalancer, client.MergeFrom(rebalancer)); err != nil {
klog.ErrorS(err, "Failed to patch WorkloadRebalancer status", "workloadRebalancer", rebalancer.Name)
klog.Errorf("Failed to patch WorkloadRebalancer (%s) status, err: %+v", rebalancer.Name, err)
return err
}
klog.V(4).InfoS("Patch WorkloadRebalancer successful", "workloadRebalancer", rebalancer.Name)
klog.V(4).Infof("Patch WorkloadRebalancer(%s) successful", rebalancer.Name)
return nil
})
}
func (c *RebalancerController) deleteWorkloadRebalancer(ctx context.Context, rebalancer *appsv1alpha1.WorkloadRebalancer) error {
klog.V(4).InfoS("Start to clean up WorkloadRebalancer", "workloadRebalancer", rebalancer.Name)
klog.V(4).Infof("Start to clean up WorkloadRebalancer(%s)", rebalancer.Name)
options := &client.DeleteOptions{Preconditions: &metav1.Preconditions{ResourceVersion: &rebalancer.ResourceVersion}}
if err := c.Client.Delete(ctx, rebalancer, options); err != nil {
klog.ErrorS(err, "Cleaning up WorkloadRebalancer failed", "workloadRebalancer", rebalancer.Name)
klog.Errorf("Cleaning up WorkloadRebalancer(%s) failed: %+v.", rebalancer.Name, err)
return err
}
klog.V(4).InfoS("Cleaning up WorkloadRebalancer successful", "workloadRebalancer", rebalancer.Name)
klog.V(4).Infof("Cleaning up WorkloadRebalancer(%s) successful", rebalancer.Name)
return nil
}
@ -296,6 +296,6 @@ func timeLeft(r *appsv1alpha1.WorkloadRebalancer) time.Duration {
expireAt := r.Status.FinishTime.Add(time.Duration(*r.Spec.TTLSecondsAfterFinished) * time.Second)
remainingTTL := time.Until(expireAt)
klog.V(4).InfoS("Check remaining TTL", "workloadRebalancer", r.Name, "FinishTime", r.Status.FinishTime.UTC(), "remainingTTL", remainingTTL)
klog.V(4).Infof("Found Rebalancer(%s) finished at: %+v, remainingTTL: %+v", r.Name, r.Status.FinishTime.UTC(), remainingTTL)
return remainingTTL
}

View File

@ -21,9 +21,7 @@ import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"math/big"
"net"
"time"
)
@ -71,37 +69,3 @@ func GenerateTestCACertificate() (string, string, error) {
return string(certPEMData), string(privKeyPEMData), nil
}
// GetFreePorts attempts to find n available TCP ports on the specified host. It
// returns a slice of allocated port numbers or an error if it fails to acquire
// them.
func GetFreePorts(host string, n int) ([]int, error) {
ports := make([]int, 0, n)
listeners := make([]net.Listener, 0, n)
// Make sure we close all listeners if there's an error.
defer func() {
for _, l := range listeners {
l.Close()
}
}()
for i := 0; i < n; i++ {
listener, err := net.Listen("tcp", fmt.Sprintf("%s:0", host))
if err != nil {
return nil, err
}
listeners = append(listeners, listener)
tcpAddr, ok := listener.Addr().(*net.TCPAddr)
if !ok {
return nil, fmt.Errorf("listener address is not a *net.TCPAddr")
}
ports = append(ports, tcpAddr.Port)
}
// At this point we have all ports, so we can close the listeners.
for _, l := range listeners {
l.Close()
}
return ports, nil
}