Update controller-runtime to v0.12.2 matching the kubernetes dependencies v0.24.2

Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
This commit is contained in:
RainbowMango 2022-06-28 20:18:26 +08:00
parent 4a243f447a
commit 9834e6419a
56 changed files with 704 additions and 232 deletions

4
go.mod
View File

@ -11,7 +11,7 @@ require (
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
github.com/olekukonko/tablewriter v0.0.4
github.com/onsi/ginkgo/v2 v2.1.3
github.com/onsi/gomega v1.17.0
github.com/onsi/gomega v1.18.1
github.com/opensearch-project/opensearch-go v1.1.0
github.com/prometheus/client_golang v1.12.1
github.com/spf13/cobra v1.4.0
@ -38,7 +38,7 @@ require (
k8s.io/kubectl v0.24.2
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
sigs.k8s.io/cluster-api v1.0.1
sigs.k8s.io/controller-runtime v0.11.1
sigs.k8s.io/controller-runtime v0.12.2
sigs.k8s.io/kind v0.12.0
sigs.k8s.io/mcs-api v0.1.0
sigs.k8s.io/structured-merge-diff/v4 v4.2.1

8
go.sum
View File

@ -660,6 +660,7 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
@ -669,8 +670,9 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
@ -1584,8 +1586,8 @@ sigs.k8s.io/cluster-api v1.0.1 h1:0YXQoemI4WnZF8RzT9T2vCtnXAi22rD4Fx1Tj2hhCEM=
sigs.k8s.io/cluster-api v1.0.1/go.mod h1:/LkJXtsvhxTV4U0z1Y2Y1Gr2xebJ0/ce09Ab2M0XU/U=
sigs.k8s.io/controller-runtime v0.6.1/go.mod h1:XRYBPdbf5XJu9kpS84VJiZ7h/u1hF3gEORz0efEja7A=
sigs.k8s.io/controller-runtime v0.10.3/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY=
sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU=
sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE=
sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=

View File

@ -1,18 +0,0 @@
language: go
arch:
- amd64
- ppc64le
go:
- gotip
- 1.16.x
- 1.15.x
env:
- GO111MODULE=on
install: skip
script:
- go mod tidy && git diff --exit-code go.mod go.sum
- make test

View File

@ -1,3 +1,25 @@
## 1.18.1
## Fixes
- Add pointer support to HaveField matcher (#495) [79e41a3]
## 1.18.0
## Features
- Docs now live on the master branch in the docs folder which will make for easier PRs. The docs also use Ginkgo 2.0's new docs html/css/js. [2570272]
- New HaveValue matcher can handle actuals that are either values (in which case they are passed on unscathed) or pointers (in which case they are indirected). [Docs here.](https://onsi.github.io/gomega/#working-with-values) (#485) [bdc087c]
- Gmeasure has been declared GA [360db9d]
## Fixes
- Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0]
## Maintenace
- Remove Travis workflow (#491) [72e6040]
- Upgrade to Ginkgo 2.0.0 GA [f383637]
- chore: fix description of HaveField matcher (#487) [2b4b2c0]
- use tools.go to ensure Ginkgo cli dependencies are included [f58a52b]
- remove dockerfile and simplify github actions to match ginkgo's actions [3f8160d]
## 1.17.0
### Features

View File

@ -1 +0,0 @@
FROM golang:1.15

View File

@ -1,33 +0,0 @@
###### Help ###################################################################
.DEFAULT_GOAL = help
.PHONY: help
help: ## list Makefile targets
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
###### Targets ################################################################
test: version download fmt vet ginkgo ## Runs all build, static analysis, and test steps
download: ## Download dependencies
go mod download
vet: ## Run static code analysis
go vet ./...
ginkgo: ## Run tests using Ginkgo
go run github.com/onsi/ginkgo/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
fmt: ## Checks that the code is formatted correcty
@@if [ -n "$$(gofmt -s -e -l -d .)" ]; then \
echo "gofmt check failed: run 'gofmt -s -e -l -w .'"; \
exit 1; \
fi
docker_test: ## Run tests in a container via docker-compose
docker-compose build test && docker-compose run --rm test make test
version: ## Display the version of Go
@@go version

View File

@ -1,10 +0,0 @@
version: '3.0'
services:
test:
build:
dockerfile: Dockerfile
context: .
working_dir: /app
volumes:
- ${PWD}:/app

View File

@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
const GOMEGA_VERSION = "1.17.0"
const GOMEGA_VERSION = "1.18.1"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().

View File

@ -0,0 +1,48 @@
//go:build go1.16
// +build go1.16
// Package gutil is a replacement for ioutil, which should not be used in new
// code as of Go 1.16. With Go 1.16 and higher, this implementation
// uses the ioutil replacement functions in "io" and "os" with some
// Gomega specifics. This means that we should not get deprecation warnings
// for ioutil when they are added.
package gutil
import (
"io"
"os"
)
func NopCloser(r io.Reader) io.ReadCloser {
return io.NopCloser(r)
}
func ReadAll(r io.Reader) ([]byte, error) {
return io.ReadAll(r)
}
func ReadDir(dirname string) ([]string, error) {
entries, err := os.ReadDir(dirname)
if err != nil {
return nil, err
}
var names []string
for _, entry := range entries {
names = append(names, entry.Name())
}
return names, nil
}
func ReadFile(filename string) ([]byte, error) {
return os.ReadFile(filename)
}
func MkdirTemp(dir, pattern string) (string, error) {
return os.MkdirTemp(dir, pattern)
}
func WriteFile(filename string, data []byte) error {
return os.WriteFile(filename, data, 0644)
}

View File

@ -0,0 +1,47 @@
//go:build !go1.16
// +build !go1.16
// Package gutil is a replacement for ioutil, which should not be used in new
// code as of Go 1.16. With Go 1.15 and lower, this implementation
// uses the ioutil functions, meaning that although Gomega is not officially
// supported on these versions, it is still likely to work.
package gutil
import (
"io"
"io/ioutil"
)
func NopCloser(r io.Reader) io.ReadCloser {
return ioutil.NopCloser(r)
}
func ReadAll(r io.Reader) ([]byte, error) {
return ioutil.ReadAll(r)
}
func ReadDir(dirname string) ([]string, error) {
files, err := ioutil.ReadDir(dirname)
if err != nil {
return nil, err
}
var names []string
for _, file := range files {
names = append(names, file.Name())
}
return names, nil
}
func ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func MkdirTemp(dir, pattern string) (string, error) {
return ioutil.TempDir(dir, pattern)
}
func WriteFile(filename string, data []byte) error {
return ioutil.WriteFile(filename, data, 0644)
}

View File

@ -357,12 +357,12 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
// type Person struct {
// FirstName string
// LastName string
// DOB time.Time
// DOB time.Time
// }
// Expect(book).To(HaveField("Title", "Les Miserables"))
// Expect(book).To(HaveField("Title", ContainSubstring("Les"))
// Expect(book).To(HaveField("Person.FirstName", Equal("Victor"))
// Expect(book).To(HaveField("Person.DOB.Year()", BeNumerically("<", 1900))
// Expect(book).To(HaveField("Author.FirstName", Equal("Victor"))
// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900))
func HaveField(field string, expected interface{}) types.GomegaMatcher {
return &matchers.HaveFieldMatcher{
Field: field,
@ -370,6 +370,26 @@ func HaveField(field string, expected interface{}) types.GomegaMatcher {
}
}
// HaveValue applies the given matcher to the value of actual, optionally and
// repeatedly dereferencing pointers or taking the concrete value of interfaces.
// Thus, the matcher will always be applied to non-pointer and non-interface
// values only. HaveValue will fail with an error if a pointer or interface is
// nil. It will also fail for more than 31 pointer or interface dereferences to
// guard against mistakenly applying it to arbitrarily deep linked pointers.
//
// HaveValue differs from gstruct.PointTo in that it does not expect actual to
// be a pointer (as gstruct.PointTo does) but instead also accepts non-pointer
// and even interface values.
//
// actual := 42
// Expect(actual).To(HaveValue(42))
// Expect(&actual).To(HaveValue(42))
func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher {
return &matchers.HaveValueMatcher{
Matcher: matcher,
}
}
//BeNumerically performs numerical assertions in a type-agnostic way.
//Actual and expected should be numbers, though the specific type of
//number is irrelevant (float32, float64, uint8, etc...).

View File

@ -12,6 +12,13 @@ func extractField(actual interface{}, field string) (interface{}, error) {
fields := strings.SplitN(field, ".", 2)
actualValue := reflect.ValueOf(actual)
if actualValue.Kind() == reflect.Ptr {
actualValue = actualValue.Elem()
}
if actualValue == (reflect.Value{}) {
return nil, fmt.Errorf("HaveField encountered nil while dereferencing a pointer of type %T.", actual)
}
if actualValue.Kind() != reflect.Struct {
return nil, fmt.Errorf("HaveField encountered:\n%s\nWhich is not a struct.", format.Object(actual, 1))
}

View File

@ -2,11 +2,11 @@ package matchers
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/internal/gutil"
"github.com/onsi/gomega/types"
)
@ -81,7 +81,7 @@ func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
if a.Body != nil {
defer a.Body.Close()
var err error
matcher.cachedBody, err = io.ReadAll(a.Body)
matcher.cachedBody, err = gutil.ReadAll(a.Body)
if err != nil {
return nil, fmt.Errorf("error reading response body: %w", err)
}

View File

@ -2,13 +2,13 @@ package matchers
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/internal/gutil"
)
type HaveHTTPStatusMatcher struct {
@ -78,7 +78,7 @@ func formatHttpResponse(input interface{}) string {
body := "<nil>"
if resp.Body != nil {
defer resp.Body.Close()
data, err := io.ReadAll(resp.Body)
data, err := gutil.ReadAll(resp.Body)
if err != nil {
data = []byte("<error reading body>")
}

54
vendor/github.com/onsi/gomega/matchers/have_value.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package matchers
import (
"errors"
"reflect"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/types"
)
const maxIndirections = 31
type HaveValueMatcher struct {
Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value.
resolvedActual interface{} // the ("resolved") value.
}
func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
val := reflect.ValueOf(actual)
for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- {
// return an error if value isn't valid. Please note that we cannot
// check for nil here, as we might not deal with a pointer or interface
// at this point.
if !val.IsValid() {
return false, errors.New(format.Message(
actual, "not to be <nil>"))
}
switch val.Kind() {
case reflect.Ptr, reflect.Interface:
// resolve pointers and interfaces to their values, then rinse and
// repeat.
if val.IsNil() {
return false, errors.New(format.Message(
actual, "not to be <nil>"))
}
val = val.Elem()
continue
default:
// forward the final value to the specified matcher.
m.resolvedActual = val.Interface()
return m.Matcher.Match(m.resolvedActual)
}
}
// too many indirections: extreme star gazing, indeed...?
return false, errors.New(format.Message(actual, "too many indirections"))
}
func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) {
return m.Matcher.FailureMessage(m.resolvedActual)
}
func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) {
return m.Matcher.NegatedFailureMessage(m.resolvedActual)
}

8
vendor/github.com/onsi/gomega/tools generated vendored Normal file
View File

@ -0,0 +1,8 @@
//go:build tools
// +build tools
package main
import (
_ "github.com/onsi/ginkgo/v2/ginkgo"
)

6
vendor/modules.txt vendored
View File

@ -291,11 +291,12 @@ github.com/onsi/ginkgo/v2/internal/parallel_support
github.com/onsi/ginkgo/v2/internal/testingtproxy
github.com/onsi/ginkgo/v2/reporters
github.com/onsi/ginkgo/v2/types
# github.com/onsi/gomega v1.17.0
# github.com/onsi/gomega v1.18.1
## explicit; go 1.16
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/internal
github.com/onsi/gomega/internal/gutil
github.com/onsi/gomega/matchers
github.com/onsi/gomega/matchers/support/goraph/bipartitegraph
github.com/onsi/gomega/matchers/support/goraph/edge
@ -1459,13 +1460,14 @@ sigs.k8s.io/cluster-api/feature
sigs.k8s.io/cluster-api/util/certs
sigs.k8s.io/cluster-api/util/secret
sigs.k8s.io/cluster-api/util/version
# sigs.k8s.io/controller-runtime v0.11.1
# sigs.k8s.io/controller-runtime v0.12.2
## explicit; go 1.17
sigs.k8s.io/controller-runtime
sigs.k8s.io/controller-runtime/pkg/builder
sigs.k8s.io/controller-runtime/pkg/cache
sigs.k8s.io/controller-runtime/pkg/cache/internal
sigs.k8s.io/controller-runtime/pkg/certwatcher
sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics
sigs.k8s.io/controller-runtime/pkg/client
sigs.k8s.io/controller-runtime/pkg/client/apiutil
sigs.k8s.io/controller-runtime/pkg/client/config

View File

@ -7,6 +7,7 @@ linters:
- depguard
- dogsled
- errcheck
- errorlint
- exportloopref
- goconst
- gocritic
@ -34,6 +35,7 @@ linters:
- typecheck
- unconvert
- unparam
- unused
- varcheck
- whitespace
@ -59,9 +61,13 @@ linters-settings:
- pkg: sigs.k8s.io/controller-runtime
alias: ctrl
staticcheck:
go: "1.17"
go: "1.18"
stylecheck:
go: "1.17"
go: "1.18"
depguard:
include-go-root: true
packages:
- io/ioutil # https://go.dev/doc/go1.16#ioutil
issues:
max-same-issues: 0
@ -121,6 +127,11 @@ issues:
- linters:
- gocritic
text: "singleCaseSwitch: should rewrite switch statement to if statement"
# It considers all file access to a filename that comes from a variable problematic,
# which is naiv at best.
- linters:
- gosec
text: "G304: Potential file inclusion via variable"
run:
timeout: 10m

View File

@ -30,13 +30,13 @@ on your situation.
take this approach: the StatefulSet controller appends a specific number
to each pod that it creates, while the Deployment controller hashes the
pod template spec and appends that.
- In the few cases when you cannot take advantage of deterministic names
(e.g. when using generateName), it may be useful in to track which
actions you took, and assume that they need to be repeated if they don't
occur after a given time (e.g. using a requeue result). This is what
the ReplicaSet controller does.
In general, write your controller with the assumption that information
will eventually be correct, but may be slightly out of date. Make sure
that your reconcile function enforces the entire state of the world each
@ -48,9 +48,9 @@ generally cover most circumstances.
### Q: Where's the fake client? How do I use it?
**A**: The fake client
[exists](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client/fake),
[exists](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake),
but we generally recommend using
[envtest.Environment](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/envtest#Environment)
[envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment)
to test against a real API server. In our experience, tests using fake
clients gradually re-implement poorly-written impressions of a real API
server, which leads to hard-to-maintain, complex test code.
@ -58,7 +58,7 @@ server, which leads to hard-to-maintain, complex test code.
### Q: How should I write tests? Any suggestions for getting started?
- Use the aforementioned
[envtest.Environment](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/envtest#Environment)
[envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment)
to spin up a real API server instead of trying to mock one out.
- Structure your tests to check that the state of the world is as you
@ -77,5 +77,5 @@ mapping between Go types and group-version-kinds in Kubernetes. In
general, your application should have its own Scheme containing the types
from the API groups that it needs (be they Kubernetes types or your own).
See the [scheme builder
docs](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/scheme) for
docs](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/scheme) for
more information.

View File

@ -27,6 +27,7 @@ aliases:
- vincepri
- alexeldeib
- varshaprasad96
- fillzpp
# folks to can approve things in the directly-ported
# testing_frameworks portions of the codebase

View File

@ -1,5 +1,5 @@
[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/controller-runtime)](https://goreportcard.com/report/sigs.k8s.io/controller-runtime)
[![godoc](https://godoc.org/sigs.k8s.io/controller-runtime?status.svg)](https://godoc.org/sigs.k8s.io/controller-runtime)
[![godoc](https://pkg.go.dev/badge/sigs.k8s.io/controller-runtime)](https://pkg.go.dev/sigs.k8s.io/controller-runtime)
# Kubernetes controller-runtime Project

View File

@ -107,7 +107,7 @@ limitations under the License.
//
// Logging (pkg/log) in controller-runtime is done via structured logs, using a
// log set of interfaces called logr
// (https://godoc.org/github.com/go-logr/logr). While controller-runtime
// (https://pkg.go.dev/github.com/go-logr/logr). While controller-runtime
// provides easy setup for using Zap (https://go.uber.org/zap, pkg/log/zap),
// you can provide any implementation of logr as the base logger for
// controller-runtime.

View File

@ -23,6 +23,7 @@ import (
"github.com/go-logr/logr"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
@ -148,9 +149,9 @@ func (blder *Builder) WithOptions(options controller.Options) *Builder {
return blder
}
// WithLogger overrides the controller options's logger used.
func (blder *Builder) WithLogger(log logr.Logger) *Builder {
blder.ctrlOptions.Log = log
// WithLogConstructor overrides the controller options's LogConstructor.
func (blder *Builder) WithLogConstructor(logConstructor func(*reconcile.Request) logr.Logger) *Builder {
blder.ctrlOptions.LogConstructor = logConstructor
return blder
}
@ -304,13 +305,31 @@ func (blder *Builder) doController(r reconcile.Reconciler) error {
ctrlOptions.CacheSyncTimeout = *globalOpts.CacheSyncTimeout
}
controllerName := blder.getControllerName(gvk)
// Setup the logger.
if ctrlOptions.Log.GetSink() == nil {
ctrlOptions.Log = blder.mgr.GetLogger()
if ctrlOptions.LogConstructor == nil {
log := blder.mgr.GetLogger().WithValues(
"controller", controllerName,
"controllerGroup", gvk.Group,
"controllerKind", gvk.Kind,
)
lowerCamelCaseKind := strings.ToLower(gvk.Kind[:1]) + gvk.Kind[1:]
ctrlOptions.LogConstructor = func(req *reconcile.Request) logr.Logger {
log := log
if req != nil {
log = log.WithValues(
lowerCamelCaseKind, klog.KRef(req.Namespace, req.Name),
"namespace", req.Namespace, "name", req.Name,
)
}
return log
}
}
ctrlOptions.Log = ctrlOptions.Log.WithValues("reconciler group", gvk.Group, "reconciler kind", gvk.Kind)
// Build the controller and return.
blder.ctrl, err = newController(blder.getControllerName(gvk), blder.mgr, ctrlOptions)
blder.ctrl, err = newController(controllerName, blder.mgr, ctrlOptions)
return err
}

View File

@ -107,6 +107,29 @@ var (
// metav1.PartialObjectMetadata to the client when fetching objects in your
// reconciler, otherwise you'll end up with a duplicate structured or
// unstructured cache.
//
// When watching a resource with OnlyMetadata, for example the v1.Pod, you
// should not Get and List using the v1.Pod type. Instead, you should use
// the special metav1.PartialObjectMetadata type.
//
// ❌ Incorrect:
//
// pod := &v1.Pod{}
// mgr.GetClient().Get(ctx, nsAndName, pod)
//
// ✅ Correct:
//
// pod := &metav1.PartialObjectMetadata{}
// pod.SetGroupVersionKind(schema.GroupVersionKind{
// Group: "",
// Version: "v1",
// Kind: "Pod",
// })
// mgr.GetClient().Get(ctx, nsAndName, pod)
//
// In the first case, controller-runtime will create another cache for the
// concrete type on top of the metadata cache; this increases memory
// consumption and leads to race conditions as caches are not in sync.
OnlyMetadata = projectAs(projectAsMetadata)
_ ForOption = OnlyMetadata

View File

@ -128,6 +128,18 @@ type Options struct {
// Be very careful with this, when enabled you must DeepCopy any object before mutating it,
// otherwise you will mutate the object in the cache.
UnsafeDisableDeepCopyByObject DisableDeepCopyByObject
// TransformByObject is a map from GVKs to transformer functions which
// get applied when objects of the transformation are about to be committed
// to cache.
//
// This function is called both for new objects to enter the cache,
// and for updated objects.
TransformByObject TransformByObject
// DefaultTransform is the transform used for all GVKs which do
// not have an explicit transform func set in TransformByObject
DefaultTransform toolscache.TransformFunc
}
var defaultResyncTime = 10 * time.Hour
@ -146,7 +158,12 @@ func New(config *rest.Config, opts Options) (Cache, error) {
if err != nil {
return nil, err
}
im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, selectorsByGVK, disableDeepCopyByGVK)
transformByGVK, err := convertToTransformByKindAndGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme)
if err != nil {
return nil, err
}
im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, selectorsByGVK, disableDeepCopyByGVK, transformByGVK)
return &informerCache{InformersMap: im}, nil
}
@ -241,3 +258,18 @@ func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObje
}
return disableDeepCopyByGVK, nil
}
// TransformByObject associate a client.Object's GVK to a transformer function
// to be applied when storing the object into the cache.
type TransformByObject map[client.Object]toolscache.TransformFunc
func convertToTransformByKindAndGVK(t TransformByObject, defaultTransform toolscache.TransformFunc, scheme *runtime.Scheme) (internal.TransformFuncByObject, error) {
result := internal.NewTransformFuncByObject()
for obj, transformation := range t {
if err := result.Set(obj, scheme, transformation); err != nil {
return nil, err
}
}
result.SetDefault(defaultTransform)
return result, nil
}

View File

@ -96,11 +96,11 @@ func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schem
return nil, nil, err
}
if !strings.HasSuffix(gvk.Kind, "List") {
return nil, nil, fmt.Errorf("non-list type %T (kind %q) passed as output", list, gvk)
}
// we need the non-list GVK, so chop off the "List" from the end of the kind
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
_, isUnstructured := list.(*unstructured.UnstructuredList)
var cacheTypeObj runtime.Object
if isUnstructured {
@ -193,8 +193,8 @@ func indexByField(indexer Informer, field string, extractor client.IndexerFunc)
rawVals := extractor(obj)
var vals []string
if ns == "" {
// if we're not doubling the keys for the namespaced case, just re-use what was returned to us
vals = rawVals
// if we're not doubling the keys for the namespaced case, just create a new slice with same length
vals = make([]string, len(rawVals))
} else {
// if we need to add non-namespaced versions too, double the length
vals = make([]string, len(rawVals)*2)

View File

@ -52,11 +52,12 @@ func NewInformersMap(config *rest.Config,
namespace string,
selectors SelectorsByGVK,
disableDeepCopy DisableDeepCopyByGVK,
transformers TransformFuncByObject,
) *InformersMap {
return &InformersMap{
structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy),
unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy),
metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy),
structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers),
unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers),
metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers),
Scheme: scheme,
}
@ -108,18 +109,18 @@ func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj
// newStructuredInformersMap creates a new InformersMap for structured objects.
func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, createStructuredListWatch)
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch)
}
// newUnstructuredInformersMap creates a new InformersMap for unstructured objects.
func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, createUnstructuredListWatch)
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch)
}
// newMetadataInformersMap creates a new InformersMap for metadata-only objects.
func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, createMetadataListWatch)
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch)
}

View File

@ -54,7 +54,9 @@ func newSpecificInformersMap(config *rest.Config,
namespace string,
selectors SelectorsByGVK,
disableDeepCopy DisableDeepCopyByGVK,
createListWatcher createListWatcherFunc) *specificInformersMap {
transformers TransformFuncByObject,
createListWatcher createListWatcherFunc,
) *specificInformersMap {
ip := &specificInformersMap{
config: config,
Scheme: scheme,
@ -68,6 +70,7 @@ func newSpecificInformersMap(config *rest.Config,
namespace: namespace,
selectors: selectors.forGVK,
disableDeepCopy: disableDeepCopy,
transformers: transformers,
}
return ip
}
@ -135,6 +138,9 @@ type specificInformersMap struct {
// disableDeepCopy indicates not to deep copy objects during get or list objects.
disableDeepCopy DisableDeepCopyByGVK
// transform funcs are applied to objects before they are committed to the cache
transformers TransformFuncByObject
}
// Start calls Run on each of the informers and sets started to true. Blocks on the context.
@ -227,6 +233,12 @@ func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, ob
ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
})
// Check to see if there is a transformer for this gvk
if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil {
return nil, false, err
}
rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, false, err

View File

@ -0,0 +1,50 @@
package internal
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// TransformFuncByObject provides access to the correct transform function for
// any given GVK.
type TransformFuncByObject interface {
Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error
Get(schema.GroupVersionKind) cache.TransformFunc
SetDefault(transformer cache.TransformFunc)
}
type transformFuncByGVK struct {
defaultTransform cache.TransformFunc
transformers map[schema.GroupVersionKind]cache.TransformFunc
}
// NewTransformFuncByObject creates a new TransformFuncByObject instance.
func NewTransformFuncByObject() TransformFuncByObject {
return &transformFuncByGVK{
transformers: make(map[schema.GroupVersionKind]cache.TransformFunc),
defaultTransform: nil,
}
}
func (t *transformFuncByGVK) SetDefault(transformer cache.TransformFunc) {
t.defaultTransform = transformer
}
func (t *transformFuncByGVK) Set(obj runtime.Object, scheme *runtime.Scheme, transformer cache.TransformFunc) error {
gvk, err := apiutil.GVKForObject(obj, scheme)
if err != nil {
return err
}
t.transformers[gvk] = transformer
return nil
}
func (t transformFuncByGVK) Get(gvk schema.GroupVersionKind) cache.TransformFunc {
if val, ok := t.transformers[gvk]; ok {
return val
}
return t.defaultTransform
}

View File

@ -55,7 +55,7 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc {
// create a cache for cluster scoped resources
gCache, err := New(config, opts)
if err != nil {
return nil, fmt.Errorf("error creating global cache %v", err)
return nil, fmt.Errorf("error creating global cache: %w", err)
}
for _, ns := range namespaces {

View File

@ -22,6 +22,7 @@ import (
"sync"
"github.com/fsnotify/fsnotify"
"sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
)
@ -116,8 +117,10 @@ func (cw *CertWatcher) Watch() {
// and updates the current certificate on the watcher. If a callback is set, it
// is invoked with the new certificate.
func (cw *CertWatcher) ReadCertificate() error {
metrics.ReadCertificateTotal.Inc()
cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath)
if err != nil {
metrics.ReadCertificateErrors.Inc()
return err
}

View File

@ -0,0 +1,45 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
var (
// ReadCertificateTotal is a prometheus counter metrics which holds the total
// number of certificate reads.
ReadCertificateTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "certwatcher_read_certificate_total",
Help: "Total number of certificate reads",
})
// ReadCertificateErrors is a prometheus counter metrics which holds the total
// number of errors from certificate read.
ReadCertificateErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "certwatcher_read_certificate_errors_total",
Help: "Total number of certificate read errors",
})
)
func init() {
metrics.Registry.MustRegister(
ReadCertificateTotal,
ReadCertificateErrors,
)
}

View File

@ -19,6 +19,7 @@ package apiutil
import (
"errors"
"sync"
"sync/atomic"
"golang.org/x/time/rate"
"k8s.io/apimachinery/pkg/api/meta"
@ -38,7 +39,8 @@ type dynamicRESTMapper struct {
lazy bool
// Used for lazy init.
initOnce sync.Once
inited uint32
initMtx sync.Mutex
}
// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper.
@ -125,11 +127,18 @@ func (drm *dynamicRESTMapper) setStaticMapper() error {
// init initializes drm only once if drm is lazy.
func (drm *dynamicRESTMapper) init() (err error) {
drm.initOnce.Do(func() {
if drm.lazy {
err = drm.setStaticMapper()
// skip init if drm is not lazy or has initialized
if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 {
return nil
}
drm.initMtx.Lock()
defer drm.initMtx.Unlock()
if drm.inited == 0 {
if err = drm.setStaticMapper(); err == nil {
atomic.StoreUint32(&drm.inited, 1)
}
})
}
return err
}

View File

@ -123,7 +123,7 @@ func loadConfig(context string) (*rest.Config, error) {
if _, ok := os.LookupEnv("HOME"); !ok {
u, err := user.Current()
if err != nil {
return nil, fmt.Errorf("could not get current user: %v", err)
return nil, fmt.Errorf("could not get current user: %w", err)
}
loadingRules.Precedence = append(loadingRules.Precedence, filepath.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName))
}

View File

@ -92,6 +92,7 @@ type ClientBuilder struct {
initObject []client.Object
initLists []client.ObjectList
initRuntimeObjects []runtime.Object
objectTracker testing.ObjectTracker
}
// WithScheme sets this builder's internal scheme.
@ -128,6 +129,12 @@ func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *C
return f
}
// WithObjectTracker can be optionally used to initialize this fake client with testing.ObjectTracker.
func (f *ClientBuilder) WithObjectTracker(ot testing.ObjectTracker) *ClientBuilder {
f.objectTracker = ot
return f
}
// Build builds and returns a new fake client.
func (f *ClientBuilder) Build() client.WithWatch {
if f.scheme == nil {
@ -137,7 +144,14 @@ func (f *ClientBuilder) Build() client.WithWatch {
f.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{})
}
tracker := versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme}
var tracker versionedTracker
if f.objectTracker == nil {
tracker = versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme}
} else {
tracker = versionedTracker{ObjectTracker: f.objectTracker, scheme: f.scheme}
}
for _, obj := range f.initObject {
if err := tracker.Add(obj); err != nil {
panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err))
@ -201,7 +215,7 @@ func (t versionedTracker) Add(obj runtime.Object) error {
func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
accessor, err := meta.Accessor(obj)
if err != nil {
return fmt.Errorf("failed to get accessor for object: %v", err)
return fmt.Errorf("failed to get accessor for object: %w", err)
}
if accessor.GetName() == "" {
return apierrors.NewInvalid(
@ -227,7 +241,7 @@ func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Ob
// convertFromUnstructuredIfNecessary will convert *unstructured.Unstructured for a GVK that is recocnized
// by the schema into the whatever the schema produces with New() for said GVK.
// This is required because the tracker unconditionally saves on manipulations, but it's List() implementation
// This is required because the tracker unconditionally saves on manipulations, but its List() implementation
// tries to assign whatever it finds into a ListType it gets from schema.New() - Thus we have to ensure
// we save as the very same type, otherwise subsequent List requests will fail.
func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (runtime.Object, error) {
@ -255,7 +269,7 @@ func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (ru
func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
accessor, err := meta.Accessor(obj)
if err != nil {
return fmt.Errorf("failed to get accessor for object: %v", err)
return fmt.Errorf("failed to get accessor for object: %w", err)
}
if accessor.GetName() == "" {
@ -301,7 +315,7 @@ func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Ob
}
intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64)
if err != nil {
return fmt.Errorf("can not convert resourceVersion %q to int: %v", oldAccessor.GetResourceVersion(), err)
return fmt.Errorf("can not convert resourceVersion %q to int: %w", oldAccessor.GetResourceVersion(), err)
}
intResourceVersion++
accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10))
@ -352,9 +366,7 @@ func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...
return nil, err
}
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
@ -371,9 +383,7 @@ func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...cl
originalKind := gvk.Kind
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
if _, isUnstructuredList := obj.(*unstructured.UnstructuredList); isUnstructuredList && !c.scheme.Recognizes(gvk) {
// We need to register the ListKind with UnstructuredList:
@ -477,6 +487,12 @@ func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...clie
delOptions := client.DeleteOptions{}
delOptions.ApplyOptions(opts)
for _, dryRunOpt := range delOptions.DryRun {
if dryRunOpt == metav1.DryRunAll {
return nil
}
}
// Check the ResourceVersion if that Precondition was specified.
if delOptions.Preconditions != nil && delOptions.Preconditions.ResourceVersion != nil {
name := accessor.GetName()
@ -511,6 +527,12 @@ func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ..
dcOptions := client.DeleteAllOfOptions{}
dcOptions.ApplyOptions(opts)
for _, dryRunOpt := range dcOptions.DryRun {
if dryRunOpt == metav1.DryRunAll {
return nil
}
}
gvr, _ := meta.UnsafeGuessKindToResource(gvk)
o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace)
if err != nil {

View File

@ -146,9 +146,7 @@ func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...List
}
gvk := metadata.GroupVersionKind()
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)

View File

@ -56,7 +56,7 @@ func (n *namespacedClient) RESTMapper() meta.RESTMapper {
func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -74,7 +74,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat
func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -92,7 +92,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat
func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -110,7 +110,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet
func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
if isNamespaceScoped {
@ -123,7 +123,7 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...
func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -141,7 +141,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o
func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
if isNamespaceScoped {
if key.Namespace != "" && key.Namespace != n.namespace {
@ -179,7 +179,7 @@ func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object,
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -198,7 +198,7 @@ func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object,
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()

View File

@ -318,7 +318,7 @@ func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
// pre-parsed selectors (since generally, selectors will be executed
// against the cache).
type ListOptions struct {
// LabelSelector filters results by label. Use SetLabelSelector to
// LabelSelector filters results by label. Use labels.Parse() to
// set from raw string form.
LabelSelector labels.Selector
// FieldSelector filters results by a particular field. In order

View File

@ -95,8 +95,7 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up
// Delete implements client.Client.
func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
_, ok := obj.(*unstructured.Unstructured)
if !ok {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
@ -118,8 +117,7 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De
// DeleteAllOf implements client.Client.
func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
_, ok := obj.(*unstructured.Unstructured)
if !ok {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
@ -141,8 +139,7 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts
// Patch implements client.Client.
func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
_, ok := obj.(*unstructured.Unstructured)
if !ok {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
@ -201,9 +198,7 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...
}
gvk := u.GroupVersionKind()
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)
@ -222,8 +217,7 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...
}
func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error {
_, ok := obj.(*unstructured.Unstructured)
if !ok {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}

View File

@ -69,9 +69,7 @@ func (w *watchingClient) listOpts(opts ...ListOption) ListOptions {
func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) {
gvk := obj.GroupVersionKind()
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := w.listOpts(opts...)
@ -85,9 +83,7 @@ func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialO
func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) {
gvk := obj.GroupVersionKind()
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
r, err := w.client.unstructuredClient.cache.getResource(obj)
if err != nil {

View File

@ -18,7 +18,7 @@ package config
import (
"fmt"
ioutil "io/ioutil"
"os"
"sync"
"k8s.io/apimachinery/pkg/runtime"
@ -96,7 +96,7 @@ func (d *DeferredFileLoader) loadFile() {
return
}
content, err := ioutil.ReadFile(d.path)
content, err := os.ReadFile(d.path)
if err != nil {
d.err = fmt.Errorf("could not read file at %s", d.path)
return

View File

@ -23,6 +23,8 @@ import (
"github.com/go-logr/logr"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/internal/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
@ -45,9 +47,9 @@ type Options struct {
// The overall is a token bucket and the per-item is exponential.
RateLimiter ratelimiter.RateLimiter
// Log is the logger used for this controller and passed to each reconciliation
// request via the context field.
Log logr.Logger
// LogConstructor is used to construct a logger used for this controller and passed
// to each reconciliation via the context field.
LogConstructor func(request *reconcile.Request) logr.Logger
// CacheSyncTimeout refers to the time limit set to wait for syncing caches.
// Defaults to 2 minutes if not set.
@ -104,8 +106,20 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller
return nil, fmt.Errorf("must specify Name for Controller")
}
if options.Log.GetSink() == nil {
options.Log = mgr.GetLogger()
if options.LogConstructor == nil {
log := mgr.GetLogger().WithValues(
"controller", name,
)
options.LogConstructor = func(req *reconcile.Request) logr.Logger {
log := log
if req != nil {
log = log.WithValues(
"object", klog.KRef(req.Namespace, req.Name),
"namespace", req.Namespace, "name", req.Name,
)
}
return log
}
}
if options.MaxConcurrentReconciles <= 0 {
@ -135,7 +149,7 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller
CacheSyncTimeout: options.CacheSyncTimeout,
SetFields: mgr.SetFields,
Name: name,
Log: options.Log.WithName("controller").WithName(name),
LogConstructor: options.LogConstructor,
RecoverPanic: options.RecoverPanic,
}, nil
}

View File

@ -345,30 +345,35 @@ func mutate(f MutateFn, key client.ObjectKey, obj client.Object) error {
return nil
}
// MutateFn is a function which mutates the existing object into it's desired state.
// MutateFn is a function which mutates the existing object into its desired state.
type MutateFn func() error
// AddFinalizer accepts an Object and adds the provided finalizer if not present.
func AddFinalizer(o client.Object, finalizer string) {
// It returns an indication of whether it updated the object's list of finalizers.
func AddFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) {
f := o.GetFinalizers()
for _, e := range f {
if e == finalizer {
return
return false
}
}
o.SetFinalizers(append(f, finalizer))
return true
}
// RemoveFinalizer accepts an Object and removes the provided finalizer if present.
func RemoveFinalizer(o client.Object, finalizer string) {
// It returns an indication of whether it updated the object's list of finalizers.
func RemoveFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) {
f := o.GetFinalizers()
for i := 0; i < len(f); i++ {
if f[i] == finalizer {
f = append(f[:i], f[i+1:]...)
i--
finalizersUpdated = true
}
}
o.SetFinalizers(f)
return
}
// ContainsFinalizer checks an Object that the provided finalizer is present.

View File

@ -25,6 +25,7 @@ import (
"github.com/go-logr/logr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/handler"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
@ -83,8 +84,11 @@ type Controller struct {
// startWatches maintains a list of sources, handlers, and predicates to start when the controller is started.
startWatches []watchDescription
// Log is used to log messages to users during reconciliation, or for example when a watch is started.
Log logr.Logger
// LogConstructor is used to construct a logger to then log messages to users during reconciliation,
// or for example when a watch is started.
// Note: LogConstructor has to be able to handle nil requests as we are also using it
// outside the context of a reconciliation.
LogConstructor func(request *reconcile.Request) logr.Logger
// RecoverPanic indicates whether the panic caused by reconcile should be recovered.
RecoverPanic bool
@ -99,18 +103,21 @@ type watchDescription struct {
// Reconcile implements reconcile.Reconciler.
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
if c.RecoverPanic {
defer func() {
if r := recover(); r != nil {
defer func() {
if r := recover(); r != nil {
if c.RecoverPanic {
for _, fn := range utilruntime.PanicHandlers {
fn(r)
}
err = fmt.Errorf("panic: %v [recovered]", r)
return
}
}()
}
log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace)
ctx = logf.IntoContext(ctx, log)
log := logf.FromContext(ctx)
log.Info(fmt.Sprintf("Observed a panic in reconciler: %v", r))
panic(r)
}
}()
return c.Do.Reconcile(ctx, req)
}
@ -140,7 +147,7 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc
return nil
}
c.Log.Info("Starting EventSource", "source", src)
c.LogConstructor(nil).Info("Starting EventSource", "source", src)
return src.Start(c.ctx, evthdler, c.Queue, prct...)
}
@ -175,7 +182,7 @@ func (c *Controller) Start(ctx context.Context) error {
// caches to sync so that they have a chance to register their intendeded
// caches.
for _, watch := range c.startWatches {
c.Log.Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src))
c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src))
if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil {
return err
@ -183,7 +190,7 @@ func (c *Controller) Start(ctx context.Context) error {
}
// Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches
c.Log.Info("Starting Controller")
c.LogConstructor(nil).Info("Starting Controller")
for _, watch := range c.startWatches {
syncingSource, ok := watch.src.(source.SyncingSource)
@ -200,7 +207,7 @@ func (c *Controller) Start(ctx context.Context) error {
// is an error or a timeout
if err := syncingSource.WaitForSync(sourceStartCtx); err != nil {
err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err)
c.Log.Error(err, "Could not wait for Cache to sync")
c.LogConstructor(nil).Error(err, "Could not wait for Cache to sync")
return err
}
@ -217,7 +224,7 @@ func (c *Controller) Start(ctx context.Context) error {
c.startWatches = nil
// Launch workers to process resources
c.Log.Info("Starting workers", "worker count", c.MaxConcurrentReconciles)
c.LogConstructor(nil).Info("Starting workers", "worker count", c.MaxConcurrentReconciles)
wg.Add(c.MaxConcurrentReconciles)
for i := 0; i < c.MaxConcurrentReconciles; i++ {
go func() {
@ -237,9 +244,9 @@ func (c *Controller) Start(ctx context.Context) error {
}
<-ctx.Done()
c.Log.Info("Shutdown signal received, waiting for all workers to finish")
c.LogConstructor(nil).Info("Shutdown signal received, waiting for all workers to finish")
wg.Wait()
c.Log.Info("All workers finished")
c.LogConstructor(nil).Info("All workers finished")
return nil
}
@ -291,19 +298,21 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
c.updateMetrics(time.Since(reconcileStartTS))
}()
// Make sure that the the object is a valid request.
// Make sure that the object is a valid request.
req, ok := obj.(reconcile.Request)
if !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.Queue.Forget(obj)
c.Log.Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj)
c.LogConstructor(nil).Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj)
// Return true, don't take a break
return
}
log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace)
log := c.LogConstructor(&req)
log = log.WithValues("reconcileID", uuid.NewUUID())
ctx = logf.IntoContext(ctx, log)
// RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the
@ -336,7 +345,7 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
// GetLogger returns this controller's logger.
func (c *Controller) GetLogger() logr.Logger {
return c.Log
return c.LogConstructor(nil)
}
// InjectFunc implement SetFields.Injector.

View File

@ -19,7 +19,6 @@ package leaderelection
import (
"errors"
"fmt"
"io/ioutil"
"os"
"k8s.io/apimachinery/pkg/util/uuid"
@ -27,6 +26,7 @@ import (
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"sigs.k8s.io/controller-runtime/pkg/recorder"
)
@ -39,7 +39,7 @@ type Options struct {
LeaderElection bool
// LeaderElectionResourceLock determines which resource lock to use for leader election,
// defaults to "configmapsleases".
// defaults to "leases".
LeaderElectionResourceLock string
// LeaderElectionNamespace determines the namespace in which the leader
@ -57,11 +57,12 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
return nil, nil
}
// Default resource lock to "configmapsleases". We must keep this default until we are sure all controller-runtime
// users have upgraded from the original default ConfigMap lock to a controller-runtime version that has this new
// default. Many users of controller-runtime skip versions, so we should be extremely conservative here.
// Default resource lock to "leases". The previous default (from v0.7.0 to v0.11.x) was configmapsleases, which was
// used to migrate from configmaps to leases. Since the default was "configmapsleases" for over a year, spanning
// five minor releases, any actively maintained operators are very likely to have a released version that uses
// "configmapsleases". Therefore defaulting to "leases" should be safe.
if options.LeaderElectionResourceLock == "" {
options.LeaderElectionResourceLock = resourcelock.ConfigMapsLeasesResourceLock
options.LeaderElectionResourceLock = resourcelock.LeasesResourceLock
}
// LeaderElectionID must be provided to prevent clashes
@ -118,7 +119,7 @@ func getInClusterNamespace() (string, error) {
}
// Load the namespace file and return its content
namespace, err := ioutil.ReadFile(inClusterNamespacePath)
namespace, err := os.ReadFile(inClusterNamespacePath)
if err != nil {
return "", fmt.Errorf("error reading namespace file: %w", err)
}

View File

@ -177,7 +177,7 @@ func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) {
}
// NewDelegatingLogSink constructs a new DelegatingLogSink which uses
// the given logger before it's promise is fulfilled.
// the given logger before its promise is fulfilled.
func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink {
l := &DelegatingLogSink{
logger: initial,

View File

@ -29,7 +29,7 @@ limitations under the License.
//
// All logging in controller-runtime is structured, using a set of interfaces
// defined by a package called logr
// (https://godoc.org/github.com/go-logr/logr). The sub-package zap provides
// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides
// helpers for setting up logr backed by Zap (go.uber.org/zap).
package log

View File

@ -47,7 +47,7 @@ type KubeAPIWarningLogger struct {
}
// HandleWarningHeader handles logging for responses from API server that are
// warnings with code being 299 and uses a logr.Logger for it's logging purposes.
// warnings with code being 299 and uses a logr.Logger for its logging purposes.
func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) {
if code != 299 || len(message) == 0 {
return

View File

@ -457,21 +457,21 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) {
// between conversion webhooks and the cache sync (usually initial list) which causes the webhooks
// to never start because no cache can be populated.
if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil {
if err != wait.ErrWaitTimeout {
if !errors.Is(err, wait.ErrWaitTimeout) {
return err
}
}
// Start and wait for caches.
if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil {
if err != wait.ErrWaitTimeout {
if !errors.Is(err, wait.ErrWaitTimeout) {
return err
}
}
// Start the non-leaderelection Runnables after the cache has synced.
if err := cm.runnables.Others.Start(cm.internalCtx); err != nil {
if err != wait.ErrWaitTimeout {
if !errors.Is(err, wait.ErrWaitTimeout) {
return err
}
}
@ -587,7 +587,7 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
}()
<-cm.shutdownCtx.Done()
if err := cm.shutdownCtx.Err(); err != nil && err != context.Canceled {
if err := cm.shutdownCtx.Err(); err != nil && !errors.Is(err, context.Canceled) {
if errors.Is(err, context.DeadlineExceeded) {
if cm.gracefulShutdownTimeout > 0 {
return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err)
@ -597,6 +597,7 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
// For any other error, return the error.
return err
}
return nil
}

View File

@ -98,9 +98,9 @@ type Manager interface {
// Options are the arguments for creating a new Manager.
type Options struct {
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources.
// Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better
// idea to pass your own scheme in. See the documentation in pkg/scheme for more information.
// to pass your own scheme in. See the documentation in pkg/scheme for more information.
Scheme *runtime.Scheme
// MapperProvider provides the rest mapper used to map go types to Kubernetes APIs
@ -186,11 +186,11 @@ type Options struct {
// between tries of actions. Default is 2 seconds.
RetryPeriod *time.Duration
// Namespace if specified restricts the manager's cache to watch objects in
// the desired namespace Defaults to all namespaces
// Namespace, if specified, restricts the manager's cache to watch objects in
// the desired namespace. Defaults to all namespaces.
//
// Note: If a namespace is specified, controllers can still Watch for a
// cluster-scoped resource (e.g Node). For namespaced resources the cache
// cluster-scoped resource (e.g Node). For namespaced resources, the cache
// will only hold objects from the desired namespace.
Namespace string
@ -228,7 +228,7 @@ type Options struct {
// if this is set, the Manager will use this server instead.
WebhookServer *webhook.Server
// Functions to all for a user to customize the values that will be injected.
// Functions to allow for a user to customize values that will be injected.
// NewCache is the function that will create the cache to be used
// by the manager. If not set this will use the default new cache function.
@ -239,6 +239,11 @@ type Options struct {
// use the cache for reads and the client for writes.
NewClient cluster.NewClientFunc
// BaseContext is the function that provides Context values to Runnables
// managed by the Manager. If a BaseContext function isn't provided, Runnables
// will receive a new Background Context instead.
BaseContext BaseContextFunc
// ClientDisableCacheFor tells the client that, if any cache is used, to bypass it
// for the given objects.
ClientDisableCacheFor []client.Object
@ -278,6 +283,10 @@ type Options struct {
newHealthProbeListener func(addr string) (net.Listener, error)
}
// BaseContextFunc is a function used to provide a base Context to Runnables
// managed by a Manager.
type BaseContextFunc func() context.Context
// Runnable allows a component to be started.
// It's very important that Start blocks until
// it's done running.
@ -335,11 +344,21 @@ func New(config *rest.Config, options Options) (Manager, error) {
}
// Create the resource lock to enable leader election)
leaderConfig := options.LeaderElectionConfig
if leaderConfig == nil {
var leaderConfig *rest.Config
var leaderRecorderProvider *intrec.Provider
if options.LeaderElectionConfig == nil {
leaderConfig = rest.CopyConfig(config)
leaderRecorderProvider = recorderProvider
} else {
leaderConfig = rest.CopyConfig(options.LeaderElectionConfig)
leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster)
if err != nil {
return nil, err
}
}
resourceLock, err := options.newResourceLock(leaderConfig, recorderProvider, leaderelection.Options{
resourceLock, err := options.newResourceLock(leaderConfig, leaderRecorderProvider, leaderelection.Options{
LeaderElection: options.LeaderElection,
LeaderElectionResourceLock: options.LeaderElectionResourceLock,
LeaderElectionID: options.LeaderElectionID,
@ -367,7 +386,7 @@ func New(config *rest.Config, options Options) (Manager, error) {
}
errChan := make(chan error)
runnables := newRunnables(errChan)
runnables := newRunnables(options.BaseContext, errChan)
return &controllerManager{
stopProcedureEngaged: pointer.Int64(0),
@ -475,6 +494,11 @@ func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Opti
}
func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options {
if obj.LeaderElection == nil {
// The source does not have any configuration; noop
return o
}
if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil {
o.LeaderElection = *obj.LeaderElection.LeaderElect
}
@ -514,11 +538,17 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) {
ln, err := net.Listen("tcp", addr)
if err != nil {
return nil, fmt.Errorf("error listening on %s: %v", addr, err)
return nil, fmt.Errorf("error listening on %s: %w", addr, err)
}
return ln, nil
}
// defaultBaseContext is used as the BaseContext value in Options if one
// has not already been set.
func defaultBaseContext() context.Context {
return context.Background()
}
// setOptionsDefaults set default values for Options fields.
func setOptionsDefaults(options Options) Options {
// Allow newResourceLock to be mocked
@ -582,5 +612,9 @@ func setOptionsDefaults(options Options) Options {
options.Logger = log.Log
}
if options.BaseContext == nil {
options.BaseContext = defaultBaseContext
}
return options
}

View File

@ -35,12 +35,12 @@ type runnables struct {
}
// newRunnables creates a new runnables object.
func newRunnables(errChan chan error) *runnables {
func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables {
return &runnables{
Webhooks: newRunnableGroup(errChan),
Caches: newRunnableGroup(errChan),
LeaderElection: newRunnableGroup(errChan),
Others: newRunnableGroup(errChan),
Webhooks: newRunnableGroup(baseContext, errChan),
Caches: newRunnableGroup(baseContext, errChan),
LeaderElection: newRunnableGroup(baseContext, errChan),
Others: newRunnableGroup(baseContext, errChan),
}
}
@ -100,14 +100,15 @@ type runnableGroup struct {
wg *sync.WaitGroup
}
func newRunnableGroup(errChan chan error) *runnableGroup {
func newRunnableGroup(baseContext BaseContextFunc, errChan chan error) *runnableGroup {
r := &runnableGroup{
startReadyCh: make(chan *readyRunnable),
errChan: errChan,
ch: make(chan *readyRunnable),
wg: new(sync.WaitGroup),
}
r.ctx, r.cancel = context.WithCancel(context.Background())
r.ctx, r.cancel = context.WithCancel(baseContext())
return r
}

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
/*

View File

@ -24,6 +24,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
)
var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters")
@ -239,6 +240,15 @@ type and struct {
predicates []Predicate
}
func (a and) InjectFunc(f inject.Func) error {
for _, p := range a.predicates {
if err := f(p); err != nil {
return err
}
}
return nil
}
func (a and) Create(e event.CreateEvent) bool {
for _, p := range a.predicates {
if !p.Create(e) {
@ -284,6 +294,15 @@ type or struct {
predicates []Predicate
}
func (o or) InjectFunc(f inject.Func) error {
for _, p := range o.predicates {
if err := f(p); err != nil {
return err
}
}
return nil
}
func (o or) Create(e event.CreateEvent) bool {
for _, p := range o.predicates {
if p.Create(e) {

View File

@ -24,6 +24,7 @@ import (
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -133,9 +134,14 @@ func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue w
i, lastErr = ks.cache.GetInformer(ctx, ks.Type)
if lastErr != nil {
kindMatchErr := &meta.NoKindMatchError{}
if errors.As(lastErr, &kindMatchErr) {
switch {
case errors.As(lastErr, &kindMatchErr):
log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start",
"kind", kindMatchErr.GroupKind)
case runtime.IsNotRegisteredError(lastErr):
log.Error(lastErr, "kind must be registered to the Scheme")
default:
log.Error(lastErr, "failed to get informer from cache")
}
return false, nil // Retry.
}
@ -175,6 +181,9 @@ func (ks *Kind) WaitForSync(ctx context.Context) error {
return err
case <-ctx.Done():
ks.startCancel()
if errors.Is(ctx.Err(), context.Canceled) {
return nil
}
return errors.New("timed out waiting for cache to be synced")
}
}

View File

@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
v1 "k8s.io/api/admission/v1"
@ -60,7 +59,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
defer r.Body.Close()
if body, err = ioutil.ReadAll(r.Body); err != nil {
if body, err = io.ReadAll(r.Body); err != nil {
wh.log.Error(err, "unable to read the body from the incoming request")
reviewResponse = Errored(http.StatusBadRequest, err)
wh.writeResponse(w, reviewResponse)
@ -125,8 +124,16 @@ func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK
// writeAdmissionResponse writes ar to w.
func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) {
if err := json.NewEncoder(w).Encode(ar); err != nil {
wh.log.Error(err, "unable to encode the response")
wh.writeResponse(w, Errored(http.StatusInternalServerError, err))
wh.log.Error(err, "unable to encode and write the response")
// Since the `ar v1.AdmissionReview` is a clear and legal object,
// it should not have problem to be marshalled into bytes.
// The error here is probably caused by the abnormal HTTP connection,
// e.g., broken pipe, so we can only write the error response once,
// to avoid endless circular calling.
serverError := Errored(http.StatusInternalServerError, err)
if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil {
wh.log.Error(err, "still unable to encode and write the InternalServerError response")
}
} else {
res := ar.Response
if log := wh.log; log.V(1).Enabled() {

View File

@ -21,7 +21,6 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
@ -77,6 +76,9 @@ type Server struct {
// "", "1.0", "1.1", "1.2" and "1.3" only ("" is equivalent to "1.0" for backwards compatibility)
TLSMinVersion string
// TLSOpts is used to allow configuring the TLS config used for the server
TLSOpts []func(*tls.Config)
// WebhookMux is the multiplexer that handles different webhooks.
WebhookMux *http.ServeMux
@ -241,9 +243,9 @@ func (s *Server) Start(ctx context.Context) error {
// load CA to verify client certificate
if s.ClientCAName != "" {
certPool := x509.NewCertPool()
clientCABytes, err := ioutil.ReadFile(filepath.Join(s.CertDir, s.ClientCAName))
clientCABytes, err := os.ReadFile(filepath.Join(s.CertDir, s.ClientCAName))
if err != nil {
return fmt.Errorf("failed to read client CA cert: %v", err)
return fmt.Errorf("failed to read client CA cert: %w", err)
}
ok := certPool.AppendCertsFromPEM(clientCABytes)
@ -255,6 +257,11 @@ func (s *Server) Start(ctx context.Context) error {
cfg.ClientAuth = tls.RequireAndVerifyClientCert
}
// fallback TLS config ready, will now mutate if passer wants full control over it
for _, op := range s.TLSOpts {
op(cfg)
}
listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg)
if err != nil {
return err
@ -305,11 +312,11 @@ func (s *Server) StartedChecker() healthz.Checker {
d := &net.Dialer{Timeout: 10 * time.Second}
conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config)
if err != nil {
return fmt.Errorf("webhook server is not reachable: %v", err)
return fmt.Errorf("webhook server is not reachable: %w", err)
}
if err := conn.Close(); err != nil {
return fmt.Errorf("webhook server is not reachable: closing connection: %v", err)
return fmt.Errorf("webhook server is not reachable: closing connection: %w", err)
}
return nil