Compare commits
157 Commits
Author | SHA1 | Date |
---|---|---|
|
093aef0dad | |
|
8007df5f6c | |
|
f8f1b61ba3 | |
|
8697b44eea | |
|
69dfa708a6 | |
|
cebed7e704 | |
|
89c110b595 | |
|
90b69e9ae5 | |
|
60c6378a12 | |
|
c7f25189f0 | |
|
9169c08c91 | |
|
d65e9d9227 | |
|
5f90e7b481 | |
|
717b8bfd69 | |
|
c61756277b | |
|
370078d070 | |
|
7258614f50 | |
|
e5ef2e16d8 | |
|
762f9b70f3 | |
|
1265e8382e | |
|
acb47d5407 | |
|
e4cacd37c4 | |
|
a16fb84a8c | |
|
4fd18478f5 | |
|
65db274b8d | |
|
11a5e25708 | |
|
04266647b1 | |
|
cc13a7d417 | |
|
ad846ac0fd | |
|
c323d36706 | |
|
782fb85b94 | |
|
f5aa9e4d10 | |
|
367311bd6f | |
|
70bee6a3a5 | |
|
b111e50082 | |
|
3ef5ab187e | |
|
30f4accb42 | |
|
00472077d3 | |
|
bfdad63e27 | |
|
a093a7627f | |
|
ccee58366a | |
|
edb9faabbf | |
|
7ac688a30f | |
|
f948991e78 | |
|
382663864e | |
|
7e21b91e9d | |
|
d78929e7f6 | |
|
54992bf424 | |
|
8d65e80ecb | |
|
363a7155a5 | |
|
73452f8a58 | |
|
d948e6b41c | |
|
8849c3f30c | |
|
0371401803 | |
|
88c35a9acf | |
|
847cfc9f8b | |
|
9ab0b2ecae | |
|
09e5225f84 | |
|
72bcdda3f0 | |
|
df9b446fd7 | |
|
3d9aab3cdc | |
|
bd7681ae3f | |
|
95e00254f8 | |
|
099cba69bd | |
|
6b2984ebc4 | |
|
7d150d0b6b | |
|
adb68bcaab | |
|
a0c23b4210 | |
|
a22b34675f | |
|
fa0e8d60a3 | |
|
f38075deb3 | |
|
4386ff4b8d | |
|
0be58f261a | |
|
83ce6ca8ce | |
|
1f371a01cf | |
|
a9fd001c11 | |
|
8a3ce6d85c | |
|
0aecd43903 | |
|
86a368824c | |
|
fbecbb86e4 | |
|
1ade3a1998 | |
|
3de313666b | |
|
5fd9f449e7 | |
|
792124280f | |
|
c1e23597e7 | |
|
aba38192fb | |
|
c0c2dd1f6f | |
|
4a5648ee41 | |
|
f15cf615b8 | |
|
9a03edb8e7 | |
|
a00ce82f1c | |
|
b0fffe419a | |
|
187312fe86 | |
|
ed7c77a929 | |
|
425d65e076 | |
|
b58645a27c | |
|
c0ffe8428a | |
|
e56739ceba | |
|
ad9a694fe4 | |
|
b4dd8b8c39 | |
|
ed70eac8b7 | |
|
917f5a0f16 | |
|
e284fd71cb | |
|
b371e3bfc5 | |
|
9664cf8123 | |
|
98ccd3d43f | |
|
3951079de1 | |
|
517c1fff4e | |
|
c036d3f6b0 | |
|
ce2fb703a6 | |
|
9970faba81 | |
|
a56a803031 | |
|
2bc3fef13e | |
|
e03364f7dd | |
|
51a33e63f1 | |
|
e5e3a1cf5c | |
|
da6623b2e7 | |
|
112657a1f9 | |
|
ab8fdc7dbd | |
|
2d495813b7 | |
|
d8c17c206f | |
|
6cde7989d5 | |
|
1c4ef33687 | |
|
da6681916f | |
|
67ddccd3cc | |
|
ed31317b27 | |
|
f9456de217 | |
|
7493226dda | |
|
4f069a220a | |
|
b855894da0 | |
|
55bb49480a | |
|
d8b1a12ce6 | |
|
a586397dc3 | |
|
73bcea9c8c | |
|
723667dff7 | |
|
531c0dbb68 | |
|
61c0cc745e | |
|
553ae80972 | |
|
c517b47f2f | |
|
e360551b19 | |
|
8aefb18433 | |
|
b0c5e00ccf | |
|
36e77462ae | |
|
5bbbdfbc69 | |
|
b560016286 | |
|
f6495020a3 | |
|
ae94ad9510 | |
|
c7bab2eeca | |
|
c0b63afb74 | |
|
2565df31d1 | |
|
c8139b3f94 | |
|
27374da031 | |
|
762cb1bc26 | |
|
bc9ce5764f | |
|
e8d9803a2b | |
|
23f41cb849 | |
|
33f542da00 |
|
@ -0,0 +1,10 @@
|
|||
# All
|
||||
** @argoproj/argocd-approvers
|
||||
|
||||
# Docs
|
||||
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/README.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
|
||||
# CI
|
||||
/.codecov.yml @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
||||
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
|
@ -4,7 +4,23 @@ updates:
|
|||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
groups:
|
||||
dependencies:
|
||||
applies-to: version-updates
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
groups:
|
||||
dependencies:
|
||||
applies-to: version-updates
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
|
|
@ -8,27 +8,23 @@ on:
|
|||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'release-*'
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- uses: actions/setup-go@v2.1.3
|
||||
with:
|
||||
go-version: '1.15.6'
|
||||
go-version-file: go.mod
|
||||
- run: go mod tidy
|
||||
- run: make test
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
version: v1.38.0
|
||||
args: --timeout 5m
|
||||
- uses: codecov/codecov-action@v1.5.0
|
||||
version: v2.1.6
|
||||
args: --verbose
|
||||
- uses: codecov/codecov-action@ad3126e916f78f00edff4ed0317cf185271ccc2d # v5.4.2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }} #required
|
||||
file: ./coverage.out
|
||||
files: ./coverage.out
|
||||
|
|
|
@ -3,4 +3,5 @@
|
|||
.vscode
|
||||
.idea
|
||||
coverage.out
|
||||
vendor/
|
||||
vendor/
|
||||
.tool-versions
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
version: "2"
|
||||
linters:
|
||||
enable:
|
||||
- errorlint
|
||||
- gocritic
|
||||
- gomodguard
|
||||
- importas
|
||||
- misspell
|
||||
- perfsprint
|
||||
- revive
|
||||
- testifylint
|
||||
- thelper
|
||||
- unparam
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- assignOp
|
||||
- exitAfterDefer
|
||||
- typeSwitchVar
|
||||
importas:
|
||||
alias:
|
||||
- pkg: k8s.io/api/apps/v1
|
||||
alias: appsv1
|
||||
- pkg: k8s.io/api/core/v1
|
||||
alias: corev1
|
||||
- pkg: k8s.io/apimachinery/pkg/api/errors
|
||||
alias: apierrors
|
||||
- pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||
alias: apiextensionsv1
|
||||
- pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
alias: metav1
|
||||
- pkg: github.com/argoproj/gitops-engine/pkg/utils/testing
|
||||
alias: testingutils
|
||||
perfsprint:
|
||||
int-conversion: true
|
||||
err-error: true
|
||||
errorf: true
|
||||
sprintf1: true
|
||||
strconcat: true
|
||||
revive:
|
||||
rules:
|
||||
- name: bool-literal-in-expr
|
||||
- name: blank-imports
|
||||
disabled: true
|
||||
- name: context-as-argument
|
||||
arguments:
|
||||
- allowTypesBefore: '*testing.T,testing.TB'
|
||||
- name: context-keys-type
|
||||
disabled: true
|
||||
- name: dot-imports
|
||||
- name: duplicated-imports
|
||||
- name: early-return
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: true
|
||||
- name: error-return
|
||||
- name: error-strings
|
||||
disabled: true
|
||||
- name: errorf
|
||||
- name: identical-branches
|
||||
- name: if-return
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: modifies-parameter
|
||||
- name: optimize-operands-order
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: redundant-import-alias
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: time-equal
|
||||
- name: time-naming
|
||||
disabled: true
|
||||
- name: unexported-return
|
||||
disabled: true
|
||||
- name: unnecessary-stmt
|
||||
- name: unreachable-code
|
||||
- name: unused-parameter
|
||||
- name: use-any
|
||||
- name: useless-break
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
disabled: true
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable:
|
||||
- go-require
|
||||
exclusions:
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- pkg/diff/internal/fieldmanager/borrowed_.*\.go$
|
||||
- internal/kubernetes_vendor
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/argoproj/gitops-engine
|
||||
exclusions:
|
||||
paths:
|
||||
- pkg/diff/internal/fieldmanager/borrowed_.*\.go$
|
||||
- internal/kubernetes_vendor
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.14.3 as builder
|
||||
FROM golang:1.22 AS builder
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
|
@ -12,5 +12,5 @@ COPY . .
|
|||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" -o /dist/gitops ./agent
|
||||
|
||||
|
||||
FROM alpine/git:v2.24.3
|
||||
COPY --from=builder /dist/gitops /usr/local/bin/gitops
|
||||
FROM alpine/git:v2.45.2
|
||||
COPY --from=builder /dist/gitops /usr/local/bin/gitops
|
||||
|
|
|
@ -31,10 +31,10 @@ The GitOps Engine follows the [CNCF Code of Conduct](https://github.com/cncf/fou
|
|||
|
||||
If you are as excited about GitOps and one common engine for it as much as we are, please get in touch. If you want to write code that's great, if you want to share feedback, ideas and use-cases, that's great too.
|
||||
|
||||
Find us on the [#gitops channel][gitops-slack] on Kubernetes Slack (get an [invite here][kube-slack]).
|
||||
Find us on the [#argo-cd-contributors][argo-cd-contributors-slack] on CNCF Slack (get an [invite here][cncf-slack]).
|
||||
|
||||
[gitops-slack]: https://kubernetes.slack.com/archives/CBT6N1ASG
|
||||
[kube-slack]: https://slack.k8s.io/
|
||||
[argo-cd-contributors-slack]: https://cloud-native.slack.com/archives/C020XM04CUW
|
||||
[cncf-slack]: https://slack.cncf.io/
|
||||
|
||||
### Contributing to the effort
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ The main difference is that the agent is syncing one Git repository into the sam
|
|||
|
||||
## Quick Start
|
||||
|
||||
By default the agent is configured to use manifests from [guestbook](https://github.com/argoproj/argocd-example-apps/tree/master/guestbook)
|
||||
By default, the agent is configured to use manifests from [guestbook](https://github.com/argoproj/argocd-example-apps/tree/master/guestbook)
|
||||
directory in https://github.com/argoproj/argocd-example-apps repository.
|
||||
|
||||
The agent supports two modes:
|
||||
|
@ -24,7 +24,7 @@ kubectl apply -f https://raw.githubusercontent.com/argoproj/gitops-engine/master
|
|||
kubectl rollout status deploy/gitops-agent
|
||||
```
|
||||
|
||||
The the agent logs:
|
||||
The agent logs:
|
||||
|
||||
```bash
|
||||
kubectl logs -f deploy/gitops-agent gitops-agent
|
||||
|
@ -56,4 +56,21 @@ Update the container env [variables](https://github.com/kubernetes/git-sync#para
|
|||
|
||||
### Demo Recording
|
||||
|
||||
[](https://asciinema.org/a/FWbvVAiSsiI87wQx2TJbRMlxN)
|
||||
[](https://asciinema.org/a/FWbvVAiSsiI87wQx2TJbRMlxN)
|
||||
|
||||
|
||||
### Profiling
|
||||
|
||||
Using env variables to enable profiling mode, the agent can be started with the following envs:
|
||||
|
||||
```bash
|
||||
export GITOPS_ENGINE_PROFILE=web
|
||||
# optional, default pprofile address is 127.0.0.1:6060
|
||||
export GITOPS_ENGINE_PROFILE_HOST=127.0.0.1
|
||||
export GITOPS_ENGINE_PROFILE_PORT=6060
|
||||
```
|
||||
|
||||
And then you can open profile in the browser(or using [pprof](https://github.com/google/pprof) cmd to generate diagrams):
|
||||
|
||||
- http://127.0.0.1:6060/debug/pprof/goroutine?debug=2
|
||||
- http://127.0.0.1:6060/debug/pprof/mutex?debug=2
|
||||
|
|
|
@ -5,33 +5,40 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/text"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/engine"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
|
||||
_ "net/http/pprof"
|
||||
)
|
||||
|
||||
const (
|
||||
annotationGCMark = "gitops-agent.argoproj.io/gc-mark"
|
||||
envProfile = "GITOPS_ENGINE_PROFILE"
|
||||
envProfileHost = "GITOPS_ENGINE_PROFILE_HOST"
|
||||
envProfilePort = "GITOPS_ENGINE_PROFILE_PORT"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := klogr.New() // Delegates to klog
|
||||
log := textlogger.NewLogger(textlogger.NewConfig())
|
||||
err := newCmd(log).Execute()
|
||||
checkError(err, log)
|
||||
}
|
||||
|
@ -47,7 +54,7 @@ type settings struct {
|
|||
|
||||
func (s *settings) getGCMark(key kube.ResourceKey) string {
|
||||
h := sha256.New()
|
||||
_, _ = h.Write([]byte(fmt.Sprintf("%s/%s", s.repoPath, strings.Join(s.paths, ","))))
|
||||
_, _ = fmt.Fprintf(h, "%s/%s", s.repoPath, strings.Join(s.paths, ","))
|
||||
_, _ = h.Write([]byte(strings.Join([]string{key.Group, key.Kind, key.Name}, "/")))
|
||||
return "sha256." + base64.RawURLEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
@ -57,7 +64,7 @@ func (s *settings) parseManifests() ([]*unstructured.Unstructured, string, error
|
|||
cmd.Dir = s.repoPath
|
||||
revision, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, "", fmt.Errorf("failed to determine git revision: %w", err)
|
||||
}
|
||||
var res []*unstructured.Unstructured
|
||||
for i := range s.paths {
|
||||
|
@ -71,18 +78,18 @@ func (s *settings) parseManifests() ([]*unstructured.Unstructured, string, error
|
|||
if ext := strings.ToLower(filepath.Ext(info.Name())); ext != ".json" && ext != ".yml" && ext != ".yaml" {
|
||||
return nil
|
||||
}
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to read file %s: %w", path, err)
|
||||
}
|
||||
items, err := kube.SplitYAML(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %s: %v", path, err)
|
||||
return fmt.Errorf("failed to parse %s: %w", path, err)
|
||||
}
|
||||
res = append(res, items...)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, "", err
|
||||
return nil, "", fmt.Errorf("failed to parse %s: %w", s.paths[i], err)
|
||||
}
|
||||
}
|
||||
for i := range res {
|
||||
|
@ -96,6 +103,19 @@ func (s *settings) parseManifests() ([]*unstructured.Unstructured, string, error
|
|||
return res, string(revision), nil
|
||||
}
|
||||
|
||||
func StartProfiler(log logr.Logger) {
|
||||
if os.Getenv(envProfile) == "web" {
|
||||
go func() {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
runtime.SetMutexProfileFraction(1)
|
||||
profilePort := text.WithDefault(os.Getenv(envProfilePort), "6060")
|
||||
profileHost := text.WithDefault(os.Getenv(envProfileHost), "127.0.0.1")
|
||||
|
||||
log.Info("pprof", "err", http.ListenAndServe(fmt.Sprintf("%s:%s", profileHost, profilePort), nil))
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func newCmd(log logr.Logger) *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
|
@ -125,10 +145,12 @@ func newCmd(log logr.Logger) *cobra.Command {
|
|||
if namespaced {
|
||||
namespaces = []string{namespace}
|
||||
}
|
||||
|
||||
StartProfiler(log)
|
||||
clusterCache := cache.NewClusterCache(config,
|
||||
cache.SetNamespaces(namespaces),
|
||||
cache.SetLogr(log),
|
||||
cache.SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) {
|
||||
cache.SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, _ bool) (info any, cacheManifest bool) {
|
||||
// store gc mark of every resource
|
||||
gcMark := un.GetAnnotations()[annotationGCMark]
|
||||
info = &resourceInfo{gcMark: un.GetAnnotations()[annotationGCMark]}
|
||||
|
@ -153,7 +175,7 @@ func newCmd(log logr.Logger) *cobra.Command {
|
|||
resync <- true
|
||||
}
|
||||
}()
|
||||
http.HandleFunc("/api/v1/sync", func(writer http.ResponseWriter, request *http.Request) {
|
||||
http.HandleFunc("/api/v1/sync", func(_ http.ResponseWriter, _ *http.Request) {
|
||||
log.Info("Synchronization triggered by API call")
|
||||
resync <- true
|
||||
})
|
||||
|
|
170
go.mod
170
go.mod
|
@ -1,51 +1,135 @@
|
|||
module github.com/argoproj/gitops-engine
|
||||
|
||||
go 1.15
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible
|
||||
github.com/go-logr/logr v0.4.0
|
||||
github.com/golang/mock v1.5.0
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/stretchr/testify v1.7.0
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
k8s.io/api v0.21.0
|
||||
k8s.io/apiextensions-apiserver v0.21.0
|
||||
k8s.io/apimachinery v0.21.0
|
||||
k8s.io/cli-runtime v0.21.0
|
||||
k8s.io/client-go v0.21.0
|
||||
k8s.io/klog/v2 v2.8.0
|
||||
k8s.io/kube-aggregator v0.21.0
|
||||
k8s.io/kubectl v0.21.0
|
||||
k8s.io/kubernetes v1.21.0
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/evanphx/json-patch/v5 v5.9.11
|
||||
github.com/go-logr/logr v1.4.3
|
||||
github.com/google/gnostic-models v0.6.9
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
go.uber.org/mock v0.5.2
|
||||
golang.org/x/sync v0.15.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
k8s.io/api v0.33.1
|
||||
k8s.io/apiextensions-apiserver v0.33.1
|
||||
k8s.io/apimachinery v0.33.1
|
||||
k8s.io/cli-runtime v0.33.1
|
||||
k8s.io/client-go v0.33.1
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-aggregator v0.33.1
|
||||
k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a
|
||||
k8s.io/kubectl v0.33.1
|
||||
k8s.io/kubernetes v1.33.1
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.3 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.5.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiserver v0.33.1 // indirect
|
||||
k8s.io/component-base v0.33.1 // indirect
|
||||
k8s.io/component-helpers v0.33.1 // indirect
|
||||
k8s.io/controller-manager v0.33.1 // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
// https://github.com/kubernetes/kubernetes/issues/79384#issuecomment-505627280
|
||||
k8s.io/api => k8s.io/api v0.21.0
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.0 // indirect
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0 // indirect
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.21.0
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.21.0
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.0
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.0
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.21.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.21.0
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.21.0
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.21.0
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.21.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.0
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.0
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.0
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.21.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.21.0
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.0
|
||||
k8s.io/metrics => k8s.io/metrics v0.21.0
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.21.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.0
|
||||
// After bumping these versions, run hack/update_static_schema.sh in case the schema has changed.
|
||||
k8s.io/api => k8s.io/api v0.33.1
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.33.1
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.33.1
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.33.1
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.33.1
|
||||
k8s.io/client-go => k8s.io/client-go v0.33.1
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.33.1
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.1
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.33.1
|
||||
k8s.io/component-base => k8s.io/component-base v0.33.1
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.33.1
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.33.1
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.33.1
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.33.1
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.1
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.1
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.33.1
|
||||
k8s.io/externaljwt => k8s.io/externaljwt v0.33.1
|
||||
k8s.io/kms => k8s.io/kms v0.33.1
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.33.1
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.33.1
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.33.1
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.1
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.33.1
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.33.1
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.33.1
|
||||
k8s.io/metrics => k8s.io/metrics v0.33.1
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.33.1
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.1
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.33.1
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.33.1
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.33.1
|
||||
)
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euox pipefail
|
||||
|
||||
# Get the k8s library version from go.mod, stripping the trailing newline
|
||||
k8s_lib_version=$(grep "k8s.io/client-go" go.mod | awk '{print $2}' | head -n 1 | tr -d '\n')
|
||||
|
||||
# Download the parser file from the k8s library
|
||||
curl -sL "https://raw.githubusercontent.com/kubernetes/client-go/$k8s_lib_version/applyconfigurations/internal/internal.go" -o pkg/utils/kube/scheme/parser.go
|
||||
|
||||
# Add a line to the beginning of the file saying that this is the script that generated it.
|
||||
sed -i '' '1s/^/\/\/ Code generated by hack\/update_static_schema.sh; DO NOT EDIT.\n\/\/ Everything below is downloaded from applyconfigurations\/internal\/internal.go in kubernetes\/client-go.\n\n/' pkg/utils/kube/scheme/parser.go
|
||||
|
||||
# Replace "package internal" with "package scheme" in the parser file
|
||||
sed -i '' 's/package internal/package scheme/' pkg/utils/kube/scheme/parser.go
|
||||
|
||||
# Replace "func Parser" with "func StaticParser"
|
||||
sed -i '' 's/func Parser/func StaticParser/' pkg/utils/kube/scheme/parser.go
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
@ -6,6 +6,8 @@ import (
|
|||
cache "github.com/argoproj/gitops-engine/pkg/cache"
|
||||
kube "github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
|
||||
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
openapi "k8s.io/kubectl/pkg/util/openapi"
|
||||
|
@ -13,8 +15,6 @@ import (
|
|||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ClusterCache is an autogenerated mock type for the ClusterCache type
|
||||
|
@ -26,6 +26,10 @@ type ClusterCache struct {
|
|||
func (_m *ClusterCache) EnsureSynced() error {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for EnsureSynced")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
|
@ -47,6 +51,10 @@ func (_m *ClusterCache) FindResources(namespace string, predicates ...func(*cach
|
|||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for FindResources")
|
||||
}
|
||||
|
||||
var r0 map[kube.ResourceKey]*cache.Resource
|
||||
if rf, ok := ret.Get(0).(func(string, ...func(*cache.Resource) bool) map[kube.ResourceKey]*cache.Resource); ok {
|
||||
r0 = rf(namespace, predicates...)
|
||||
|
@ -59,16 +67,20 @@ func (_m *ClusterCache) FindResources(namespace string, predicates ...func(*cach
|
|||
return r0
|
||||
}
|
||||
|
||||
// GetAPIGroups provides a mock function with given fields:
|
||||
func (_m *ClusterCache) GetAPIGroups() []v1.APIGroup {
|
||||
// GetAPIResources provides a mock function with given fields:
|
||||
func (_m *ClusterCache) GetAPIResources() []kube.APIResourceInfo {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []v1.APIGroup
|
||||
if rf, ok := ret.Get(0).(func() []v1.APIGroup); ok {
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetAPIResources")
|
||||
}
|
||||
|
||||
var r0 []kube.APIResourceInfo
|
||||
if rf, ok := ret.Get(0).(func() []kube.APIResourceInfo); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]v1.APIGroup)
|
||||
r0 = ret.Get(0).([]kube.APIResourceInfo)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,6 +91,10 @@ func (_m *ClusterCache) GetAPIGroups() []v1.APIGroup {
|
|||
func (_m *ClusterCache) GetClusterInfo() cache.ClusterInfo {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetClusterInfo")
|
||||
}
|
||||
|
||||
var r0 cache.ClusterInfo
|
||||
if rf, ok := ret.Get(0).(func() cache.ClusterInfo); ok {
|
||||
r0 = rf()
|
||||
|
@ -89,11 +105,39 @@ func (_m *ClusterCache) GetClusterInfo() cache.ClusterInfo {
|
|||
return r0
|
||||
}
|
||||
|
||||
// GetGVKParser provides a mock function with given fields:
|
||||
func (_m *ClusterCache) GetGVKParser() *managedfields.GvkParser {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetGVKParser")
|
||||
}
|
||||
|
||||
var r0 *managedfields.GvkParser
|
||||
if rf, ok := ret.Get(0).(func() *managedfields.GvkParser); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*managedfields.GvkParser)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetManagedLiveObjs provides a mock function with given fields: targetObjs, isManaged
|
||||
func (_m *ClusterCache) GetManagedLiveObjs(targetObjs []*unstructured.Unstructured, isManaged func(*cache.Resource) bool) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
|
||||
ret := _m.Called(targetObjs, isManaged)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetManagedLiveObjs")
|
||||
}
|
||||
|
||||
var r0 map[kube.ResourceKey]*unstructured.Unstructured
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func([]*unstructured.Unstructured, func(*cache.Resource) bool) (map[kube.ResourceKey]*unstructured.Unstructured, error)); ok {
|
||||
return rf(targetObjs, isManaged)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func([]*unstructured.Unstructured, func(*cache.Resource) bool) map[kube.ResourceKey]*unstructured.Unstructured); ok {
|
||||
r0 = rf(targetObjs, isManaged)
|
||||
} else {
|
||||
|
@ -102,7 +146,6 @@ func (_m *ClusterCache) GetManagedLiveObjs(targetObjs []*unstructured.Unstructur
|
|||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func([]*unstructured.Unstructured, func(*cache.Resource) bool) error); ok {
|
||||
r1 = rf(targetObjs, isManaged)
|
||||
} else {
|
||||
|
@ -116,6 +159,10 @@ func (_m *ClusterCache) GetManagedLiveObjs(targetObjs []*unstructured.Unstructur
|
|||
func (_m *ClusterCache) GetOpenAPISchema() openapi.Resources {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetOpenAPISchema")
|
||||
}
|
||||
|
||||
var r0 openapi.Resources
|
||||
if rf, ok := ret.Get(0).(func() openapi.Resources); ok {
|
||||
r0 = rf()
|
||||
|
@ -132,6 +179,10 @@ func (_m *ClusterCache) GetOpenAPISchema() openapi.Resources {
|
|||
func (_m *ClusterCache) GetServerVersion() string {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetServerVersion")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
|
@ -157,14 +208,21 @@ func (_m *ClusterCache) Invalidate(opts ...cache.UpdateSettingsFunc) {
|
|||
func (_m *ClusterCache) IsNamespaced(gk schema.GroupKind) (bool, error) {
|
||||
ret := _m.Called(gk)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for IsNamespaced")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupKind) (bool, error)); ok {
|
||||
return rf(gk)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupKind) bool); ok {
|
||||
r0 = rf(gk)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(schema.GroupKind) error); ok {
|
||||
r1 = rf(gk)
|
||||
} else {
|
||||
|
@ -175,14 +233,23 @@ func (_m *ClusterCache) IsNamespaced(gk schema.GroupKind) (bool, error) {
|
|||
}
|
||||
|
||||
// IterateHierarchy provides a mock function with given fields: key, action
|
||||
func (_m *ClusterCache) IterateHierarchy(key kube.ResourceKey, action func(*cache.Resource, map[kube.ResourceKey]*cache.Resource)) {
|
||||
func (_m *ClusterCache) IterateHierarchy(key kube.ResourceKey, action func(*cache.Resource, map[kube.ResourceKey]*cache.Resource) bool) {
|
||||
_m.Called(key, action)
|
||||
}
|
||||
|
||||
// IterateHierarchyV2 provides a mock function with given fields: keys, action
|
||||
func (_m *ClusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(*cache.Resource, map[kube.ResourceKey]*cache.Resource) bool) {
|
||||
_m.Called(keys, action)
|
||||
}
|
||||
|
||||
// OnEvent provides a mock function with given fields: handler
|
||||
func (_m *ClusterCache) OnEvent(handler cache.OnEventHandler) cache.Unsubscribe {
|
||||
ret := _m.Called(handler)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for OnEvent")
|
||||
}
|
||||
|
||||
var r0 cache.Unsubscribe
|
||||
if rf, ok := ret.Get(0).(func(cache.OnEventHandler) cache.Unsubscribe); ok {
|
||||
r0 = rf(handler)
|
||||
|
@ -195,10 +262,34 @@ func (_m *ClusterCache) OnEvent(handler cache.OnEventHandler) cache.Unsubscribe
|
|||
return r0
|
||||
}
|
||||
|
||||
// OnProcessEventsHandler provides a mock function with given fields: handler
|
||||
func (_m *ClusterCache) OnProcessEventsHandler(handler cache.OnProcessEventsHandler) cache.Unsubscribe {
|
||||
ret := _m.Called(handler)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for OnProcessEventsHandler")
|
||||
}
|
||||
|
||||
var r0 cache.Unsubscribe
|
||||
if rf, ok := ret.Get(0).(func(cache.OnProcessEventsHandler) cache.Unsubscribe); ok {
|
||||
r0 = rf(handler)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(cache.Unsubscribe)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OnResourceUpdated provides a mock function with given fields: handler
|
||||
func (_m *ClusterCache) OnResourceUpdated(handler cache.OnResourceUpdatedHandler) cache.Unsubscribe {
|
||||
ret := _m.Called(handler)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for OnResourceUpdated")
|
||||
}
|
||||
|
||||
var r0 cache.Unsubscribe
|
||||
if rf, ok := ret.Get(0).(func(cache.OnResourceUpdatedHandler) cache.Unsubscribe); ok {
|
||||
r0 = rf(handler)
|
||||
|
@ -210,3 +301,17 @@ func (_m *ClusterCache) OnResourceUpdated(handler cache.OnResourceUpdatedHandler
|
|||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewClusterCache creates a new instance of ClusterCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewClusterCache(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *ClusterCache {
|
||||
mock := &ClusterCache{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/rest"
|
||||
|
@ -24,7 +25,7 @@ func TestResourceOfGroupKind(t *testing.T) {
|
|||
Name: "deploy",
|
||||
},
|
||||
}
|
||||
service := &appsv1.Deployment{
|
||||
service := &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "",
|
||||
Kind: "Service",
|
||||
|
@ -81,12 +82,12 @@ func TestGetNamespaceResources(t *testing.T) {
|
|||
|
||||
resources := cluster.FindResources("default", TopLevelResource)
|
||||
assert.Len(t, resources, 2)
|
||||
assert.Equal(t, resources[getResourceKey(t, defaultNamespaceTopLevel1)].Ref.Name, "helm-guestbook1")
|
||||
assert.Equal(t, resources[getResourceKey(t, defaultNamespaceTopLevel2)].Ref.Name, "helm-guestbook2")
|
||||
assert.Equal(t, "helm-guestbook1", resources[getResourceKey(t, defaultNamespaceTopLevel1)].Ref.Name)
|
||||
assert.Equal(t, "helm-guestbook2", resources[getResourceKey(t, defaultNamespaceTopLevel2)].Ref.Name)
|
||||
|
||||
resources = cluster.FindResources("kube-system", TopLevelResource)
|
||||
assert.Len(t, resources, 1)
|
||||
assert.Equal(t, resources[getResourceKey(t, kubesystemNamespaceTopLevel2)].Ref.Name, "helm-guestbook3")
|
||||
assert.Equal(t, "helm-guestbook3", resources[getResourceKey(t, kubesystemNamespaceTopLevel2)].Ref.Name)
|
||||
}
|
||||
|
||||
func ExampleNewClusterCache_inspectNamespaceResources() {
|
||||
|
@ -97,7 +98,7 @@ func ExampleNewClusterCache_inspectNamespaceResources() {
|
|||
// cache default namespace only
|
||||
SetNamespaces([]string{"default", "kube-system"}),
|
||||
// configure custom logic to cache resources manifest and additional metadata
|
||||
SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) {
|
||||
SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, _ bool) (info any, cacheManifest bool) {
|
||||
// if resource belongs to 'extensions' group then mark if with 'deprecated' label
|
||||
if un.GroupVersionKind().Group == "extensions" {
|
||||
info = []string{"deprecated"}
|
||||
|
@ -114,8 +115,9 @@ func ExampleNewClusterCache_inspectNamespaceResources() {
|
|||
}
|
||||
// Iterate default namespace resources tree
|
||||
for _, root := range clusterCache.FindResources("default", TopLevelResource) {
|
||||
clusterCache.IterateHierarchy(root.ResourceKey(), func(resource *Resource, _ map[kube.ResourceKey]*Resource) {
|
||||
clusterCache.IterateHierarchy(root.ResourceKey(), func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
fmt.Printf("resource: %s, info: %v\n", resource.Ref.String(), resource.Info)
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@ package cache
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"regexp"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -24,9 +24,8 @@ func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured)
|
|||
gvk := un.GroupVersionKind()
|
||||
|
||||
switch {
|
||||
|
||||
// Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed
|
||||
case gvk.Group == "" && gvk.Kind == kube.EndpointsKind && len(un.GetOwnerReferences()) == 0:
|
||||
case gvk.Group == "" && gvk.Kind == kube.EndpointsKind && len(ownerRefs) == 0:
|
||||
ownerRefs = append(ownerRefs, metav1.OwnerReference{
|
||||
Name: un.GetName(),
|
||||
Kind: kube.ServiceKind,
|
||||
|
@ -34,7 +33,7 @@ func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured)
|
|||
})
|
||||
|
||||
// Special case for Operator Lifecycle Manager ClusterServiceVersion:
|
||||
case un.GroupVersionKind().Group == "operators.coreos.com" && un.GetKind() == "ClusterServiceVersion":
|
||||
case gvk.Group == "operators.coreos.com" && gvk.Kind == "ClusterServiceVersion":
|
||||
if un.GetAnnotations()["olm.operatorGroup"] != "" {
|
||||
ownerRefs = append(ownerRefs, metav1.OwnerReference{
|
||||
Name: un.GetAnnotations()["olm.operatorGroup"],
|
||||
|
@ -44,12 +43,12 @@ func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured)
|
|||
}
|
||||
|
||||
// Edge case: consider auto-created service account tokens as a child of service account objects
|
||||
case un.GetKind() == kube.SecretKind && un.GroupVersionKind().Group == "":
|
||||
case gvk.Kind == kube.SecretKind && gvk.Group == "":
|
||||
if yes, ref := isServiceAccountTokenSecret(un); yes {
|
||||
ownerRefs = append(ownerRefs, ref)
|
||||
}
|
||||
|
||||
case (un.GroupVersionKind().Group == "apps" || un.GroupVersionKind().Group == "extensions") && un.GetKind() == kube.StatefulSetKind:
|
||||
case (gvk.Group == "apps" || gvk.Group == "extensions") && gvk.Kind == kube.StatefulSetKind:
|
||||
if refs, err := isStatefulSetChild(un); err != nil {
|
||||
c.log.Error(err, fmt.Sprintf("Failed to extract StatefulSet %s/%s PVC references", un.GetNamespace(), un.GetName()))
|
||||
} else {
|
||||
|
@ -61,21 +60,21 @@ func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured)
|
|||
}
|
||||
|
||||
func isStatefulSetChild(un *unstructured.Unstructured) (func(kube.ResourceKey) bool, error) {
|
||||
sts := v1.StatefulSet{}
|
||||
sts := appsv1.StatefulSet{}
|
||||
data, err := json.Marshal(un)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to marshal unstructured object: %w", err)
|
||||
}
|
||||
err = json.Unmarshal(data, &sts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to unmarshal statefulset: %w", err)
|
||||
}
|
||||
|
||||
templates := sts.Spec.VolumeClaimTemplates
|
||||
return func(key kube.ResourceKey) bool {
|
||||
if key.Kind == kube.PersistentVolumeClaimKind && key.GroupKind().Group == "" {
|
||||
for _, templ := range templates {
|
||||
if strings.HasPrefix(key.Name, fmt.Sprintf("%s-%s-", templ.Name, un.GetName())) {
|
||||
if match, _ := regexp.MatchString(fmt.Sprintf(`%s-%s-\d+$`, templ.Name, un.GetName()), key.Name); match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func Test_isStatefulSetChild(t *testing.T) {
|
||||
type args struct {
|
||||
un *unstructured.Unstructured
|
||||
}
|
||||
|
||||
statefulSet := &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "StatefulSet",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sw-broker",
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "emqx-data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a new unstructured object from the JSON string
|
||||
un, err := kube.ToUnstructured(statefulSet)
|
||||
require.NoErrorf(t, err, "Failed to convert StatefulSet to unstructured: %v", err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
checkFunc func(func(kube.ResourceKey) bool) bool
|
||||
}{
|
||||
{
|
||||
name: "Valid PVC for sw-broker",
|
||||
args: args{un: un},
|
||||
wantErr: false,
|
||||
checkFunc: func(fn func(kube.ResourceKey) bool) bool {
|
||||
// Check a valid PVC name for "sw-broker"
|
||||
return fn(kube.ResourceKey{Kind: "PersistentVolumeClaim", Name: "emqx-data-sw-broker-0"})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Invalid PVC for sw-broker",
|
||||
args: args{un: un},
|
||||
wantErr: false,
|
||||
checkFunc: func(fn func(kube.ResourceKey) bool) bool {
|
||||
// Check an invalid PVC name that should belong to "sw-broker-internal"
|
||||
return !fn(kube.ResourceKey{Kind: "PersistentVolumeClaim", Name: "emqx-data-sw-broker-internal-0"})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mismatch PVC for sw-broker",
|
||||
args: args{un: &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "StatefulSet",
|
||||
"metadata": map[string]any{
|
||||
"name": "sw-broker",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"volumeClaimTemplates": []any{
|
||||
map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "volume-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantErr: false,
|
||||
checkFunc: func(fn func(kube.ResourceKey) bool) bool {
|
||||
// Check an invalid PVC name for "api-test"
|
||||
return !fn(kube.ResourceKey{Kind: "PersistentVolumeClaim", Name: "volume-2"})
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Execute test cases
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := isStatefulSetChild(tt.args.un)
|
||||
assert.Equal(t, tt.wantErr, err != nil, "isStatefulSetChild() error = %v, wantErr %v", err, tt.wantErr)
|
||||
if err == nil {
|
||||
assert.True(t, tt.checkFunc(got), "Check function failed for %v", tt.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -3,7 +3,9 @@ package cache
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
|
@ -15,13 +17,13 @@ type Resource struct {
|
|||
// ResourceVersion holds most recent observed resource version
|
||||
ResourceVersion string
|
||||
// Resource reference
|
||||
Ref v1.ObjectReference
|
||||
Ref corev1.ObjectReference
|
||||
// References to resource owners
|
||||
OwnerRefs []metav1.OwnerReference
|
||||
// Optional creation timestamp of the resource
|
||||
CreationTimestamp *metav1.Time
|
||||
// Optional additional information about the resource
|
||||
Info interface{}
|
||||
Info any
|
||||
// Optional whole resource manifest
|
||||
Resource *unstructured.Unstructured
|
||||
|
||||
|
@ -35,7 +37,6 @@ func (r *Resource) ResourceKey() kube.ResourceKey {
|
|||
|
||||
func (r *Resource) isParentOf(child *Resource) bool {
|
||||
for i, ownerRef := range child.OwnerRefs {
|
||||
|
||||
// backfill UID of inferred owner child references
|
||||
if ownerRef.UID == "" && r.Ref.Kind == ownerRef.Kind && r.Ref.APIVersion == ownerRef.APIVersion && r.Ref.Name == ownerRef.Name {
|
||||
ownerRef.UID = r.Ref.UID
|
||||
|
@ -85,16 +86,46 @@ func newResourceKeySet(set map[kube.ResourceKey]bool, keys ...kube.ResourceKey)
|
|||
return newSet
|
||||
}
|
||||
|
||||
func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents map[kube.ResourceKey]bool, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource)) {
|
||||
func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents map[kube.ResourceKey]bool, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
|
||||
for childKey, child := range ns {
|
||||
if r.isParentOf(ns[childKey]) {
|
||||
if parents[childKey] {
|
||||
key := r.ResourceKey()
|
||||
action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
|
||||
} else {
|
||||
action(nil, child, ns)
|
||||
_ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
|
||||
} else if action(nil, child, ns) {
|
||||
child.iterateChildren(ns, newResourceKeySet(parents, r.ResourceKey()), action)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// iterateChildrenV2 is a depth-first traversal of the graph of resources starting from the current resource.
|
||||
func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, visited map[kube.ResourceKey]int, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
|
||||
key := r.ResourceKey()
|
||||
if visited[key] == 2 {
|
||||
return
|
||||
}
|
||||
// this indicates that we've started processing this node's children
|
||||
visited[key] = 1
|
||||
defer func() {
|
||||
// this indicates that we've finished processing this node's children
|
||||
visited[key] = 2
|
||||
}()
|
||||
children, ok := graph[key]
|
||||
if !ok || children == nil {
|
||||
return
|
||||
}
|
||||
for _, c := range children {
|
||||
childKey := c.ResourceKey()
|
||||
child := ns[childKey]
|
||||
switch visited[childKey] {
|
||||
case 1:
|
||||
// Since we encountered a node that we're currently processing, we know we have a circular dependency.
|
||||
_ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
|
||||
case 0:
|
||||
if action(nil, child, ns) {
|
||||
child.iterateChildrenV2(graph, ns, visited, action)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,12 +7,12 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
var c = NewClusterCache(&rest.Config{})
|
||||
var cacheTest = NewClusterCache(&rest.Config{})
|
||||
|
||||
func TestIsParentOf(t *testing.T) {
|
||||
child := c.newResource(mustToUnstructured(testPod()))
|
||||
parent := c.newResource(mustToUnstructured(testRS()))
|
||||
grandParent := c.newResource(mustToUnstructured(testDeploy()))
|
||||
child := cacheTest.newResource(mustToUnstructured(testPod1()))
|
||||
parent := cacheTest.newResource(mustToUnstructured(testRS()))
|
||||
grandParent := cacheTest.newResource(mustToUnstructured(testDeploy()))
|
||||
|
||||
assert.True(t, parent.isParentOf(child))
|
||||
assert.False(t, grandParent.isParentOf(child))
|
||||
|
@ -22,14 +22,14 @@ func TestIsParentOfSameKindDifferentGroupAndUID(t *testing.T) {
|
|||
rs := testRS()
|
||||
rs.APIVersion = "somecrd.io/v1"
|
||||
rs.SetUID("123")
|
||||
child := c.newResource(mustToUnstructured(testPod()))
|
||||
invalidParent := c.newResource(mustToUnstructured(rs))
|
||||
child := cacheTest.newResource(mustToUnstructured(testPod1()))
|
||||
invalidParent := cacheTest.newResource(mustToUnstructured(rs))
|
||||
|
||||
assert.False(t, invalidParent.isParentOf(child))
|
||||
}
|
||||
|
||||
func TestIsServiceParentOfEndPointWithTheSameName(t *testing.T) {
|
||||
nonMatchingNameEndPoint := c.newResource(strToUnstructured(`
|
||||
nonMatchingNameEndPoint := cacheTest.newResource(strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
|
@ -37,7 +37,7 @@ metadata:
|
|||
namespace: default
|
||||
`))
|
||||
|
||||
matchingNameEndPoint := c.newResource(strToUnstructured(`
|
||||
matchingNameEndPoint := cacheTest.newResource(strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
|
@ -45,15 +45,15 @@ metadata:
|
|||
namespace: default
|
||||
`))
|
||||
|
||||
parent := c.newResource(testService)
|
||||
parent := cacheTest.newResource(testService)
|
||||
|
||||
assert.True(t, parent.isParentOf(matchingNameEndPoint))
|
||||
assert.Equal(t, parent.Ref.UID, matchingNameEndPoint.OwnerRefs[0].UID)
|
||||
assert.False(t, parent.isParentOf(nonMatchingNameEndPoint))
|
||||
}
|
||||
|
||||
func TestIsServiceAccoountParentOfSecret(t *testing.T) {
|
||||
serviceAccount := c.newResource(strToUnstructured(`
|
||||
func TestIsServiceAccountParentOfSecret(t *testing.T) {
|
||||
serviceAccount := cacheTest.newResource(strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
@ -63,7 +63,7 @@ metadata:
|
|||
secrets:
|
||||
- name: default-token-123
|
||||
`))
|
||||
tokenSecret := c.newResource(strToUnstructured(`
|
||||
tokenSecret := cacheTest.newResource(strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
|
|
|
@ -12,9 +12,13 @@ import (
|
|||
"github.com/argoproj/gitops-engine/pkg/utils/tracing"
|
||||
)
|
||||
|
||||
type noopSettings struct {
|
||||
// NewNoopSettings returns cache settings that has not health customizations and don't filter any resources
|
||||
func NewNoopSettings() *noopSettings {
|
||||
return &noopSettings{}
|
||||
}
|
||||
|
||||
type noopSettings struct{}
|
||||
|
||||
func (f *noopSettings) GetResourceHealth(_ *unstructured.Unstructured) (*health.HealthStatus, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -108,6 +112,20 @@ func SetResyncTimeout(timeout time.Duration) UpdateSettingsFunc {
|
|||
}
|
||||
}
|
||||
|
||||
// SetWatchResyncTimeout updates cluster re-sync timeout
|
||||
func SetWatchResyncTimeout(timeout time.Duration) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.watchResyncTimeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// SetClusterSyncRetryTimeout updates cluster sync retry timeout when sync error happens
|
||||
func SetClusterSyncRetryTimeout(timeout time.Duration) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.clusterSyncRetryTimeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogr sets the logger to use.
|
||||
func SetLogr(log logr.Logger) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
|
@ -126,3 +144,42 @@ func SetTracer(tracer tracing.Tracer) UpdateSettingsFunc {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetRetryOptions sets cluster list retry options
|
||||
func SetRetryOptions(maxRetries int32, useBackoff bool, retryFunc ListRetryFunc) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
// Max retries must be at least one
|
||||
if maxRetries < 1 {
|
||||
maxRetries = 1
|
||||
}
|
||||
cache.listRetryLimit = maxRetries
|
||||
cache.listRetryUseBackoff = useBackoff
|
||||
cache.listRetryFunc = retryFunc
|
||||
}
|
||||
}
|
||||
|
||||
// SetRespectRBAC allows to set whether to respect the controller rbac in list/watches
|
||||
func SetRespectRBAC(respectRBAC int) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
// if invalid value is provided disable respect rbac
|
||||
if respectRBAC < RespectRbacDisabled || respectRBAC > RespectRbacStrict {
|
||||
cache.respectRBAC = RespectRbacDisabled
|
||||
} else {
|
||||
cache.respectRBAC = respectRBAC
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetBatchEventsProcessing allows to set whether to process events in batch
|
||||
func SetBatchEventsProcessing(batchProcessing bool) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.batchEventsProcessing = batchProcessing
|
||||
}
|
||||
}
|
||||
|
||||
// SetEventProcessingInterval allows to set the interval for processing events
|
||||
func SetEventProcessingInterval(interval time.Duration) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.eventProcessingInterval = interval
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,10 +39,36 @@ func TestSetNamespaces(t *testing.T) {
|
|||
|
||||
func TestSetResyncTimeout(t *testing.T) {
|
||||
cache := NewClusterCache(&rest.Config{})
|
||||
assert.Equal(t, clusterResyncTimeout, cache.syncStatus.resyncTimeout)
|
||||
assert.Equal(t, defaultClusterResyncTimeout, cache.syncStatus.resyncTimeout)
|
||||
|
||||
timeout := 1 * time.Hour
|
||||
cache.Invalidate(SetResyncTimeout(timeout))
|
||||
|
||||
assert.Equal(t, timeout, cache.syncStatus.resyncTimeout)
|
||||
}
|
||||
|
||||
func TestSetWatchResyncTimeout(t *testing.T) {
|
||||
cache := NewClusterCache(&rest.Config{})
|
||||
assert.Equal(t, defaultWatchResyncTimeout, cache.watchResyncTimeout)
|
||||
|
||||
timeout := 30 * time.Minute
|
||||
cache = NewClusterCache(&rest.Config{}, SetWatchResyncTimeout(timeout))
|
||||
assert.Equal(t, timeout, cache.watchResyncTimeout)
|
||||
}
|
||||
|
||||
func TestSetBatchEventsProcessing(t *testing.T) {
|
||||
cache := NewClusterCache(&rest.Config{})
|
||||
assert.False(t, cache.batchEventsProcessing)
|
||||
|
||||
cache.Invalidate(SetBatchEventsProcessing(true))
|
||||
assert.True(t, cache.batchEventsProcessing)
|
||||
}
|
||||
|
||||
func TestSetEventsProcessingInterval(t *testing.T) {
|
||||
cache := NewClusterCache(&rest.Config{})
|
||||
assert.Equal(t, defaultEventProcessingInterval, cache.eventProcessingInterval)
|
||||
|
||||
interval := 1 * time.Second
|
||||
cache.Invalidate(SetEventProcessingInterval(interval))
|
||||
assert.Equal(t, interval, cache.eventProcessingInterval)
|
||||
}
|
||||
|
|
743
pkg/diff/diff.go
743
pkg/diff/diff.go
File diff suppressed because it is too large
Load Diff
|
@ -1,8 +1,13 @@
|
|||
package diff
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
)
|
||||
|
||||
type Option func(*options)
|
||||
|
@ -13,13 +18,20 @@ type options struct {
|
|||
ignoreAggregatedRoles bool
|
||||
normalizer Normalizer
|
||||
log logr.Logger
|
||||
structuredMergeDiff bool
|
||||
gvkParser *managedfields.GvkParser
|
||||
manager string
|
||||
serverSideDiff bool
|
||||
serverSideDryRunner ServerSideDryRunner
|
||||
ignoreMutationWebhook bool
|
||||
}
|
||||
|
||||
func applyOptions(opts []Option) options {
|
||||
o := options{
|
||||
ignoreAggregatedRoles: false,
|
||||
ignoreMutationWebhook: true,
|
||||
normalizer: GetNoopNormalizer(),
|
||||
log: klogr.New(),
|
||||
log: textlogger.NewLogger(textlogger.NewConfig()),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&o)
|
||||
|
@ -27,6 +39,37 @@ func applyOptions(opts []Option) options {
|
|||
return o
|
||||
}
|
||||
|
||||
type KubeApplier interface {
|
||||
ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string) (string, error)
|
||||
}
|
||||
|
||||
// ServerSideDryRunner defines the contract to run a server-side apply in
|
||||
// dryrun mode.
|
||||
type ServerSideDryRunner interface {
|
||||
Run(ctx context.Context, obj *unstructured.Unstructured, manager string) (string, error)
|
||||
}
|
||||
|
||||
// K8sServerSideDryRunner is the Kubernetes implementation of ServerSideDryRunner.
|
||||
type K8sServerSideDryRunner struct {
|
||||
dryrunApplier KubeApplier
|
||||
}
|
||||
|
||||
// NewK8sServerSideDryRunner will instantiate a new K8sServerSideDryRunner with
|
||||
// the given kubeApplier.
|
||||
func NewK8sServerSideDryRunner(kubeApplier KubeApplier) *K8sServerSideDryRunner {
|
||||
return &K8sServerSideDryRunner{
|
||||
dryrunApplier: kubeApplier,
|
||||
}
|
||||
}
|
||||
|
||||
// ServerSideApplyDryRun will invoke a kubernetes server-side apply with the given
|
||||
// obj and the given manager in dryrun mode. Will return the predicted live state
|
||||
// json as string.
|
||||
func (kdr *K8sServerSideDryRunner) Run(ctx context.Context, obj *unstructured.Unstructured, manager string) (string, error) {
|
||||
//nolint:wrapcheck // trivial function, don't bother wrapping
|
||||
return kdr.dryrunApplier.ApplyResource(ctx, obj, cmdutil.DryRunServer, false, false, true, manager)
|
||||
}
|
||||
|
||||
func IgnoreAggregatedRoles(ignore bool) Option {
|
||||
return func(o *options) {
|
||||
o.ignoreAggregatedRoles = ignore
|
||||
|
@ -44,3 +87,39 @@ func WithLogr(log logr.Logger) Option {
|
|||
o.log = log
|
||||
}
|
||||
}
|
||||
|
||||
func WithStructuredMergeDiff(smd bool) Option {
|
||||
return func(o *options) {
|
||||
o.structuredMergeDiff = smd
|
||||
}
|
||||
}
|
||||
|
||||
func WithGVKParser(parser *managedfields.GvkParser) Option {
|
||||
return func(o *options) {
|
||||
o.gvkParser = parser
|
||||
}
|
||||
}
|
||||
|
||||
func WithManager(manager string) Option {
|
||||
return func(o *options) {
|
||||
o.manager = manager
|
||||
}
|
||||
}
|
||||
|
||||
func WithServerSideDiff(ssd bool) Option {
|
||||
return func(o *options) {
|
||||
o.serverSideDiff = ssd
|
||||
}
|
||||
}
|
||||
|
||||
func WithIgnoreMutationWebhook(mw bool) Option {
|
||||
return func(o *options) {
|
||||
o.ignoreMutationWebhook = mw
|
||||
}
|
||||
}
|
||||
|
||||
func WithServerSideDryRunner(ssadr ServerSideDryRunner) Option {
|
||||
return func(o *options) {
|
||||
o.serverSideDryRunner = ssadr
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,2 @@
|
|||
Please check the doc.go file for more details about
|
||||
how to use and maintain the code in this package.
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// EmptyFields represents a set with no paths
|
||||
// It looks like metav1.Fields{Raw: []byte("{}")}
|
||||
var EmptyFields = func() metav1.FieldsV1 {
|
||||
f, err := SetToFields(*fieldpath.NewSet())
|
||||
if err != nil {
|
||||
panic("should never happen")
|
||||
}
|
||||
return f
|
||||
}()
|
||||
|
||||
// FieldsToSet creates a set paths from an input trie of fields
|
||||
func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) {
|
||||
err = s.FromJSON(bytes.NewReader(f.Raw))
|
||||
return s, err
|
||||
}
|
||||
|
||||
// SetToFields creates a trie of fields from an input set of paths
|
||||
func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) {
|
||||
f.Raw, err = s.ToJSON()
|
||||
return f, err
|
||||
}
|
|
@ -0,0 +1,248 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
|
||||
type ManagedInterface interface {
|
||||
// Fields gets the fieldpath.ManagedFields.
|
||||
Fields() fieldpath.ManagedFields
|
||||
|
||||
// Times gets the timestamps associated with each operation.
|
||||
Times() map[string]*metav1.Time
|
||||
}
|
||||
|
||||
type managedStruct struct {
|
||||
fields fieldpath.ManagedFields
|
||||
times map[string]*metav1.Time
|
||||
}
|
||||
|
||||
var _ ManagedInterface = &managedStruct{}
|
||||
|
||||
// Fields implements ManagedInterface.
|
||||
func (m *managedStruct) Fields() fieldpath.ManagedFields {
|
||||
return m.fields
|
||||
}
|
||||
|
||||
// Times implements ManagedInterface.
|
||||
func (m *managedStruct) Times() map[string]*metav1.Time {
|
||||
return m.times
|
||||
}
|
||||
|
||||
// NewEmptyManaged creates an empty ManagedInterface.
|
||||
func NewEmptyManaged() ManagedInterface {
|
||||
return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{})
|
||||
}
|
||||
|
||||
// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation.
|
||||
func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface {
|
||||
return &managedStruct{
|
||||
fields: f,
|
||||
times: t,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveObjectManagedFields removes the ManagedFields from the object
|
||||
// before we merge so that it doesn't appear in the ManagedFields
|
||||
// recursively.
|
||||
func RemoveObjectManagedFields(obj runtime.Object) {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
accessor.SetManagedFields(nil)
|
||||
}
|
||||
|
||||
// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields
|
||||
func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
|
||||
encodedManagedFields, err := encodeManagedFields(managed)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert back managed fields to API: %v", err)
|
||||
}
|
||||
accessor.SetManagedFields(encodedManagedFields)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeManagedFields converts ManagedFields from the wire format (api format)
|
||||
// to the format used by sigs.k8s.io/structured-merge-diff
|
||||
func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) {
|
||||
managed := managedStruct{}
|
||||
managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields))
|
||||
managed.times = make(map[string]*metav1.Time, len(encodedManagedFields))
|
||||
|
||||
for i, encodedVersionedSet := range encodedManagedFields {
|
||||
switch encodedVersionedSet.Operation {
|
||||
case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate:
|
||||
default:
|
||||
return nil, fmt.Errorf("operation must be `Apply` or `Update`")
|
||||
}
|
||||
if len(encodedVersionedSet.APIVersion) < 1 {
|
||||
return nil, fmt.Errorf("apiVersion must not be empty")
|
||||
}
|
||||
switch encodedVersionedSet.FieldsType {
|
||||
case "FieldsV1":
|
||||
// Valid case.
|
||||
case "":
|
||||
return nil, fmt.Errorf("missing fieldsType in managed fields entry %d", i)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i)
|
||||
}
|
||||
manager, err := BuildManagerIdentifier(&encodedVersionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err)
|
||||
}
|
||||
managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err)
|
||||
}
|
||||
managed.times[manager] = encodedVersionedSet.Time
|
||||
}
|
||||
return &managed, nil
|
||||
}
|
||||
|
||||
// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry
|
||||
func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) {
|
||||
encodedManagerCopy := *encodedManager
|
||||
|
||||
// Never include fields type in the manager identifier
|
||||
encodedManagerCopy.FieldsType = ""
|
||||
|
||||
// Never include the fields in the manager identifier
|
||||
encodedManagerCopy.FieldsV1 = nil
|
||||
|
||||
// Never include the time in the manager identifier
|
||||
encodedManagerCopy.Time = nil
|
||||
|
||||
// For appliers, don't include the APIVersion in the manager identifier,
|
||||
// so it will always have the same manager identifier each time it applied.
|
||||
if encodedManager.Operation == metav1.ManagedFieldsOperationApply {
|
||||
encodedManagerCopy.APIVersion = ""
|
||||
}
|
||||
|
||||
// Use the remaining fields to build the manager identifier
|
||||
b, err := json.Marshal(&encodedManagerCopy)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshalling manager identifier: %v", err)
|
||||
}
|
||||
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) {
|
||||
fields := EmptyFields
|
||||
if encodedVersionedSet.FieldsV1 != nil {
|
||||
fields = *encodedVersionedSet.FieldsV1
|
||||
}
|
||||
set, err := FieldsToSet(fields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding set: %v", err)
|
||||
}
|
||||
return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil
|
||||
}
|
||||
|
||||
// encodeManagedFields converts ManagedFields from the format used by
|
||||
// sigs.k8s.io/structured-merge-diff to the wire format (api format)
|
||||
func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) {
|
||||
if len(managed.Fields()) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
encodedManagedFields = []metav1.ManagedFieldsEntry{}
|
||||
for manager := range managed.Fields() {
|
||||
versionedSet := managed.Fields()[manager]
|
||||
v, err := encodeManagerVersionedSet(manager, versionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err)
|
||||
}
|
||||
if t, ok := managed.Times()[manager]; ok {
|
||||
v.Time = t
|
||||
}
|
||||
encodedManagedFields = append(encodedManagedFields, *v)
|
||||
}
|
||||
return sortEncodedManagedFields(encodedManagedFields)
|
||||
}
|
||||
|
||||
func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) {
|
||||
sort.Slice(encodedManagedFields, func(i, j int) bool {
|
||||
p, q := encodedManagedFields[i], encodedManagedFields[j]
|
||||
|
||||
if p.Operation != q.Operation {
|
||||
return p.Operation < q.Operation
|
||||
}
|
||||
|
||||
pSeconds, qSeconds := int64(0), int64(0)
|
||||
if p.Time != nil {
|
||||
pSeconds = p.Time.Unix()
|
||||
}
|
||||
if q.Time != nil {
|
||||
qSeconds = q.Time.Unix()
|
||||
}
|
||||
if pSeconds != qSeconds {
|
||||
return pSeconds < qSeconds
|
||||
}
|
||||
|
||||
if p.Manager != q.Manager {
|
||||
return p.Manager < q.Manager
|
||||
}
|
||||
|
||||
if p.APIVersion != q.APIVersion {
|
||||
return p.APIVersion < q.APIVersion
|
||||
}
|
||||
return p.Subresource < q.Subresource
|
||||
})
|
||||
|
||||
return encodedManagedFields, nil
|
||||
}
|
||||
|
||||
func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) {
|
||||
encodedVersionedSet = &metav1.ManagedFieldsEntry{}
|
||||
|
||||
// Get as many fields as we can from the manager identifier
|
||||
err = json.Unmarshal([]byte(manager), encodedVersionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err)
|
||||
}
|
||||
|
||||
// Get the APIVersion, Operation, and Fields from the VersionedSet
|
||||
encodedVersionedSet.APIVersion = string(versionedSet.APIVersion())
|
||||
if versionedSet.Applied() {
|
||||
encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply
|
||||
}
|
||||
encodedVersionedSet.FieldsType = "FieldsV1"
|
||||
fields, err := SetToFields(*versionedSet.Set())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding set: %v", err)
|
||||
}
|
||||
encodedVersionedSet.FieldsV1 = &fields
|
||||
|
||||
return encodedVersionedSet, nil
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/kube-openapi/pkg/util/proto"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/value"
|
||||
)
|
||||
|
||||
// TypeConverter allows you to convert from runtime.Object to
|
||||
// typed.TypedValue and the other way around.
|
||||
type TypeConverter interface {
|
||||
ObjectToTyped(runtime.Object) (*typed.TypedValue, error)
|
||||
TypedToObject(*typed.TypedValue) (runtime.Object, error)
|
||||
}
|
||||
|
||||
// DeducedTypeConverter is a TypeConverter for CRDs that don't have a
|
||||
// schema. It does implement the same interface though (and create the
|
||||
// same types of objects), so that everything can still work the same.
|
||||
// CRDs are merged with all their fields being "atomic" (lists
|
||||
// included).
|
||||
//
|
||||
// Note that this is not going to be sufficient for converting to/from
|
||||
// CRDs that have a schema defined (we don't support that schema yet).
|
||||
// TODO(jennybuckley): Use the schema provided by a CRD if it exists.
|
||||
type DeducedTypeConverter struct{}
|
||||
|
||||
var _ TypeConverter = DeducedTypeConverter{}
|
||||
|
||||
// ObjectToTyped converts an object into a TypedValue with a "deduced type".
|
||||
func (DeducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) {
|
||||
switch o := obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent())
|
||||
default:
|
||||
return typed.DeducedParseableType.FromStructured(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// TypedToObject transforms the typed value into a runtime.Object. That
|
||||
// is not specific to deduced type.
|
||||
func (DeducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
|
||||
return valueToObject(value.AsValue())
|
||||
}
|
||||
|
||||
type typeConverter struct {
|
||||
parser *managedfields.GvkParser
|
||||
}
|
||||
|
||||
var _ TypeConverter = &typeConverter{}
|
||||
|
||||
// NewTypeConverter builds a TypeConverter from a proto.Models. This
|
||||
// will automatically find the proper version of the object, and the
|
||||
// corresponding schema information.
|
||||
func NewTypeConverter(models proto.Models, preserveUnknownFields bool) (TypeConverter, error) {
|
||||
parser, err := managedfields.NewGVKParser(models, preserveUnknownFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &typeConverter{parser: parser}, nil
|
||||
}
|
||||
|
||||
func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) {
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
t := c.parser.Type(gvk)
|
||||
if t == nil {
|
||||
return nil, newNoCorrespondingTypeError(gvk)
|
||||
}
|
||||
switch o := obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return t.FromUnstructured(o.UnstructuredContent())
|
||||
default:
|
||||
return t.FromStructured(obj)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
|
||||
return valueToObject(value.AsValue())
|
||||
}
|
||||
|
||||
func valueToObject(val value.Value) (runtime.Object, error) {
|
||||
vu := val.Unstructured()
|
||||
switch o := vu.(type) {
|
||||
case map[string]any:
|
||||
return &unstructured.Unstructured{Object: o}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu)
|
||||
}
|
||||
}
|
||||
|
||||
type noCorrespondingTypeErr struct {
|
||||
gvk schema.GroupVersionKind
|
||||
}
|
||||
|
||||
func newNoCorrespondingTypeError(gvk schema.GroupVersionKind) error {
|
||||
return &noCorrespondingTypeErr{gvk: gvk}
|
||||
}
|
||||
|
||||
func (k *noCorrespondingTypeErr) Error() string {
|
||||
return fmt.Sprintf("no corresponding type for %v", k.gvk)
|
||||
}
|
||||
|
||||
func isNoCorrespondingTypeError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := err.(*noCorrespondingTypeErr)
|
||||
return ok
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
)
|
||||
|
||||
// versionConverter is an implementation of
|
||||
// sigs.k8s.io/structured-merge-diff/merge.Converter
|
||||
type versionConverter struct {
|
||||
typeConverter TypeConverter
|
||||
objectConvertor runtime.ObjectConvertor
|
||||
hubGetter func(from schema.GroupVersion) schema.GroupVersion
|
||||
}
|
||||
|
||||
var _ merge.Converter = &versionConverter{}
|
||||
|
||||
// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor.
|
||||
func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
|
||||
return &versionConverter{
|
||||
typeConverter: t,
|
||||
objectConvertor: o,
|
||||
hubGetter: func(from schema.GroupVersion) schema.GroupVersion {
|
||||
return schema.GroupVersion{
|
||||
Group: from.Group,
|
||||
Version: h.Version,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor.
|
||||
func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
|
||||
return &versionConverter{
|
||||
typeConverter: t,
|
||||
objectConvertor: o,
|
||||
hubGetter: func(from schema.GroupVersion) schema.GroupVersion {
|
||||
return h
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter
|
||||
func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) {
|
||||
// Convert the smd typed value to a kubernetes object.
|
||||
objectToConvert, err := v.typeConverter.TypedToObject(object)
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// Parse the target groupVersion.
|
||||
groupVersion, err := schema.ParseGroupVersion(string(version))
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// If attempting to convert to the same version as we already have, just return it.
|
||||
fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion()
|
||||
if fromVersion == groupVersion {
|
||||
return object, nil
|
||||
}
|
||||
|
||||
// Convert to internal
|
||||
internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion))
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// Convert the object into the target version
|
||||
convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion)
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// Convert the object back to a smd typed value and return it.
|
||||
return v.typeConverter.ObjectToTyped(convertedObject)
|
||||
}
|
||||
|
||||
// IsMissingVersionError
|
||||
func (v *versionConverter) IsMissingVersionError(err error) bool {
|
||||
return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err)
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
Package fieldmanager is a special package as its main purpose
|
||||
is to expose the dependencies required by structured-merge-diff
|
||||
library to calculate diffs when server-side apply option is enabled.
|
||||
The dependency tree necessary to have a `merge.Updater` instance
|
||||
isn't trivial to implement and the strategy used is borrowing a copy
|
||||
from Kubernetes apiserver codebase in order to expose the required
|
||||
functionality.
|
||||
|
||||
Below there is a list of borrowed files and a reference to which
|
||||
package/file in Kubernetes they were copied from:
|
||||
|
||||
- borrowed_fields.go: k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go
|
||||
- borrowed_managedfields.go: k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go
|
||||
- borrowed_typeconverter.go: k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go
|
||||
- borrowed_versionconverter.go: k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go
|
||||
|
||||
In order to keep maintenance as minimal as possible the borrowed
|
||||
files are verbatim copy from Kubernetes. The private objects that
|
||||
need to be exposed are wrapped in the wrapper.go file. Updating
|
||||
the borrowed files should be trivial in most cases but must be done
|
||||
manually as we have no control over future refactorings Kubernetes
|
||||
might do.
|
||||
*/
|
||||
package fieldmanager
|
|
@ -0,0 +1,22 @@
|
|||
package fieldmanager
|
||||
|
||||
/*
|
||||
In order to keep maintenance as minimal as possible the borrowed
|
||||
files in this package are verbatim copy from Kubernetes. The
|
||||
private objects that need to be exposed are wrapped and exposed
|
||||
in this file.
|
||||
*/
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
)
|
||||
|
||||
// NewVersionConverter will expose the version converter from the
|
||||
// borrowed private function from k8s apiserver handler.
|
||||
func NewVersionConverter(gvkParser *managedfields.GvkParser, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
|
||||
tc := &typeConverter{parser: gvkParser}
|
||||
return newVersionConverter(tc, o, h)
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
// Code generated by mockery v2.38.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// ServerSideDryRunner is an autogenerated mock type for the ServerSideDryRunner type
|
||||
type ServerSideDryRunner struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Run provides a mock function with given fields: ctx, obj, manager
|
||||
func (_m *ServerSideDryRunner) Run(ctx context.Context, obj *unstructured.Unstructured, manager string) (string, error) {
|
||||
ret := _m.Called(ctx, obj, manager)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Run")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *unstructured.Unstructured, string) (string, error)); ok {
|
||||
return rf(ctx, obj, manager)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *unstructured.Unstructured, string) string); ok {
|
||||
r0 = rf(ctx, obj, manager)
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *unstructured.Unstructured, string) error); ok {
|
||||
r1 = rf(ctx, obj, manager)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// NewServerSideDryRunner creates a new instance of ServerSideDryRunner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewServerSideDryRunner(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *ServerSideDryRunner {
|
||||
mock := &ServerSideDryRunner{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
package testdata
|
||||
|
||||
import _ "embed"
|
||||
|
||||
var (
|
||||
//go:embed smd-service-config.yaml
|
||||
ServiceConfigYAML string
|
||||
|
||||
//go:embed smd-service-live.yaml
|
||||
ServiceLiveYAML string
|
||||
|
||||
//go:embed smd-service-config-2-ports.yaml
|
||||
ServiceConfigWith2Ports string
|
||||
|
||||
//go:embed smd-service-live-with-type.yaml
|
||||
LiveServiceWithTypeYAML string
|
||||
|
||||
//go:embed smd-service-config-ports.yaml
|
||||
ServiceConfigWithSamePortsYAML string
|
||||
|
||||
//go:embed smd-deploy-live.yaml
|
||||
DeploymentLiveYAML string
|
||||
|
||||
//go:embed smd-deploy-config.yaml
|
||||
DeploymentConfigYAML string
|
||||
|
||||
//go:embed smd-deploy2-live.yaml
|
||||
Deployment2LiveYAML string
|
||||
|
||||
//go:embed smd-deploy2-config.yaml
|
||||
Deployment2ConfigYAML string
|
||||
|
||||
//go:embed smd-deploy2-predicted-live.json
|
||||
Deployment2PredictedLiveJSONSSD string
|
||||
|
||||
// OpenAPIV2Doc is a binary representation of the openapi
|
||||
// document available in a given k8s instance. To update
|
||||
// this file the following commands can be executed:
|
||||
// kubectl proxy --port=7777 &
|
||||
// curl -s -H Accept:application/com.github.proto-openapi.spec.v2@v1.0+protobuf http://localhost:7777/openapi/v2 > openapiv2.bin
|
||||
//
|
||||
//go:embed openapiv2.bin
|
||||
OpenAPIV2Doc []byte
|
||||
|
||||
//go:embed ssd-service-config.yaml
|
||||
ServiceConfigYAMLSSD string
|
||||
|
||||
//go:embed ssd-service-live.yaml
|
||||
ServiceLiveYAMLSSD string
|
||||
|
||||
//go:embed ssd-service-predicted-live.json
|
||||
ServicePredictedLiveJSONSSD string
|
||||
|
||||
//go:embed ssd-deploy-nested-config.yaml
|
||||
DeploymentNestedConfigYAMLSSD string
|
||||
|
||||
//go:embed ssd-deploy-nested-live.yaml
|
||||
DeploymentNestedLiveYAMLSSD string
|
||||
|
||||
//go:embed ssd-deploy-nested-predicted-live.json
|
||||
DeploymentNestedPredictedLiveJSONSSD string
|
||||
|
||||
//go:embed ssd-deploy-with-manual-apply-config.yaml
|
||||
DeploymentApplyConfigYAMLSSD string
|
||||
|
||||
//go:embed ssd-deploy-with-manual-apply-live.yaml
|
||||
DeploymentApplyLiveYAMLSSD string
|
||||
|
||||
//go:embed ssd-deploy-with-manual-apply-predicted-live.json
|
||||
DeploymentApplyPredictedLiveJSONSSD string
|
||||
|
||||
//go:embed ssd-svc-label-live.yaml
|
||||
ServiceLiveLabelYAMLSSD string
|
||||
|
||||
//go:embed ssd-svc-no-label-config.yaml
|
||||
ServiceConfigNoLabelYAMLSSD string
|
||||
|
||||
//go:embed ssd-svc-no-label-predicted-live.json
|
||||
ServicePredictedLiveNoLabelJSONSSD string
|
||||
)
|
|
@ -35,6 +35,19 @@
|
|||
{
|
||||
"name": "solr-http",
|
||||
"port": 8080
|
||||
},
|
||||
{
|
||||
"name": "solr-https",
|
||||
"port": 8443
|
||||
},
|
||||
{
|
||||
"name": "solr-node",
|
||||
"port": 8983,
|
||||
"protocol": "UDP"
|
||||
},
|
||||
{
|
||||
"name": "solr-zookeeper",
|
||||
"port": 9983
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"metadata": {
|
||||
"annotations": {
|
||||
"description": "A workaround to support a set of backend IPs for solr",
|
||||
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Endpoints\",\"metadata\":{\"annotations\":{\"description\":\"A workaround to support a set of backend IPs for solr\",\"linkerd.io/inject\":\"disabled\"},\"labels\":{\"app.kubernetes.io/instance\":\"guestbook\"},\"name\":\"solrcloud\",\"namespace\":\"default\"},\"subsets\":[{\"addresses\":[{\"ip\":\"172.20.10.97\"},{\"ip\":\"172.20.10.98\"},{\"ip\":\"172.20.10.99\"},{\"ip\":\"172.20.10.100\"},{\"ip\":\"172.20.10.101\"}],\"ports\":[{\"name\":\"solr-http\",\"port\":8080}]}]}\n",
|
||||
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Endpoints\",\"metadata\":{\"annotations\":{\"description\":\"A workaround to support a set of backend IPs for solr\",\"linkerd.io/inject\":\"disabled\"},\"labels\":{\"app.kubernetes.io/instance\":\"guestbook\"},\"name\":\"solrcloud\",\"namespace\":\"default\"},\"subsets\":[{\"addresses\":[{\"ip\":\"172.20.10.97\"},{\"ip\":\"172.20.10.98\"},{\"ip\":\"172.20.10.99\"},{\"ip\":\"172.20.10.100\"},{\"ip\":\"172.20.10.101\"}],\"ports\":[{\"name\":\"solr-http\",\"port\":8080},{\"name\":\"solr-https\",\"port\":8443},{\"name\":\"solr-node\",\"port\":8983,\"protocol\":\"UDP\"},{\"name\":\"solr-zookeeper\",\"port\":9983}]}]}\n",
|
||||
"linkerd.io/inject": "disabled"
|
||||
},
|
||||
"creationTimestamp": null,
|
||||
|
@ -32,24 +32,17 @@
|
|||
},
|
||||
"manager": "main",
|
||||
"operation": "Update",
|
||||
"time": "2020-10-09T17:26:49Z"
|
||||
"time": null
|
||||
}
|
||||
],
|
||||
"name": "solrcloud",
|
||||
"namespace": "default",
|
||||
"resourceVersion": "139834",
|
||||
"selfLink": "/api/v1/namespaces/default/endpoints/solrcloud",
|
||||
"uid": "f11285f4-987b-4194-bda8-6372b3f3f08f"
|
||||
"resourceVersion": "2336",
|
||||
"uid": "439a86ee-cbf9-4717-9ce3-d44079333a27"
|
||||
},
|
||||
"subsets": [
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "172.20.10.100"
|
||||
},
|
||||
{
|
||||
"ip": "172.20.10.101"
|
||||
},
|
||||
{
|
||||
"ip": "172.20.10.97"
|
||||
},
|
||||
|
@ -58,6 +51,12 @@
|
|||
},
|
||||
{
|
||||
"ip": "172.20.10.99"
|
||||
},
|
||||
{
|
||||
"ip": "172.20.10.100"
|
||||
},
|
||||
{
|
||||
"ip": "172.20.10.101"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
|
@ -65,6 +64,21 @@
|
|||
"name": "solr-http",
|
||||
"port": 8080,
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"name": "solr-https",
|
||||
"port": 8443,
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"name": "solr-node",
|
||||
"port": 8983,
|
||||
"protocol": "UDP"
|
||||
},
|
||||
{
|
||||
"name": "solr-zookeeper",
|
||||
"port": 9983,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,33 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: missing
|
||||
applications.argoproj.io/app-name: nginx
|
||||
something-else: bla
|
||||
name: nginx-deployment
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
applications.argoproj.io/app-name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: 'nginx:1.23.1'
|
||||
imagePullPolicy: Never
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- non-existent-file
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 180
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
|
@ -0,0 +1,149 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: '1'
|
||||
creationTimestamp: '2022-09-18T23:50:25Z'
|
||||
generation: 1
|
||||
labels:
|
||||
app: missing
|
||||
applications.argoproj.io/app-name: nginx
|
||||
something-else: bla
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app': {}
|
||||
'f:applications.argoproj.io/app-name': {}
|
||||
'f:something-else': {}
|
||||
'f:spec':
|
||||
'f:replicas': {}
|
||||
'f:selector': {}
|
||||
'f:template':
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app': {}
|
||||
'f:applications.argoproj.io/app-name': {}
|
||||
'f:spec':
|
||||
'f:containers':
|
||||
'k:{"name":"nginx"}':
|
||||
.: {}
|
||||
'f:image': {}
|
||||
'f:imagePullPolicy': {}
|
||||
'f:livenessProbe':
|
||||
'f:exec':
|
||||
'f:command': {}
|
||||
'f:initialDelaySeconds': {}
|
||||
'f:periodSeconds': {}
|
||||
'f:name': {}
|
||||
'f:ports':
|
||||
'k:{"containerPort":80,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:containerPort': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2022-09-18T23:50:25Z'
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:deployment.kubernetes.io/revision': {}
|
||||
'f:status':
|
||||
'f:availableReplicas': {}
|
||||
'f:conditions':
|
||||
.: {}
|
||||
'k:{"type":"Available"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'k:{"type":"Progressing"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'f:observedGeneration': {}
|
||||
'f:readyReplicas': {}
|
||||
'f:replicas': {}
|
||||
'f:updatedReplicas': {}
|
||||
manager: kube-controller-manager
|
||||
operation: Update
|
||||
subresource: status
|
||||
time: '2022-09-23T18:30:59Z'
|
||||
name: nginx-deployment
|
||||
namespace: default
|
||||
resourceVersion: '7492752'
|
||||
uid: 731f7434-d3d9-47fa-b179-d9368a84f7c9
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 2
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: nginx
|
||||
applications.argoproj.io/app-name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: 'nginx:1.23.1'
|
||||
imagePullPolicy: Never
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- non-existent-file
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 180
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
availableReplicas: 2
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-09-18T23:50:25Z'
|
||||
lastUpdateTime: '2022-09-18T23:50:26Z'
|
||||
message: ReplicaSet "nginx-deployment-6d68ff5f86" has successfully progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
- lastTransitionTime: '2022-09-23T18:30:59Z'
|
||||
lastUpdateTime: '2022-09-23T18:30:59Z'
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: Available
|
||||
observedGeneration: 1
|
||||
readyReplicas: 2
|
||||
replicas: 2
|
||||
updatedReplicas: 2
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: missing
|
||||
applications.argoproj.io/app-name: nginx
|
||||
something-else: bla
|
||||
name: nginx-deployment
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
applications.argoproj.io/app-name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: 'nginx:1.23.1'
|
||||
imagePullPolicy: Never
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- non-existent-file
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 180
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
protocol: UDP
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
|
@ -0,0 +1,161 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: '1'
|
||||
creationTimestamp: '2022-09-18T23:50:25Z'
|
||||
generation: 1
|
||||
labels:
|
||||
app: missing
|
||||
applications.argoproj.io/app-name: nginx
|
||||
something-else: bla
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app': {}
|
||||
'f:applications.argoproj.io/app-name': {}
|
||||
'f:something-else': {}
|
||||
'f:spec':
|
||||
'f:replicas': {}
|
||||
'f:selector': {}
|
||||
'f:template':
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app': {}
|
||||
'f:applications.argoproj.io/app-name': {}
|
||||
'f:spec':
|
||||
'f:containers':
|
||||
'k:{"name":"nginx"}':
|
||||
.: {}
|
||||
'f:image': {}
|
||||
'f:imagePullPolicy': {}
|
||||
'f:livenessProbe':
|
||||
'f:exec':
|
||||
'f:command': {}
|
||||
'f:initialDelaySeconds': {}
|
||||
'f:periodSeconds': {}
|
||||
'f:name': {}
|
||||
'f:ports':
|
||||
'k:{"containerPort":80,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:containerPort': {}
|
||||
'f:protocol': {}
|
||||
'f:resources':
|
||||
'f:requests':
|
||||
'f:cpu': {}
|
||||
'f:memory': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2022-09-18T23:50:25Z'
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:deployment.kubernetes.io/revision': {}
|
||||
'f:status':
|
||||
'f:availableReplicas': {}
|
||||
'f:conditions':
|
||||
.: {}
|
||||
'k:{"type":"Available"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'k:{"type":"Progressing"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'f:observedGeneration': {}
|
||||
'f:readyReplicas': {}
|
||||
'f:replicas': {}
|
||||
'f:updatedReplicas': {}
|
||||
manager: kube-controller-manager
|
||||
operation: Update
|
||||
subresource: status
|
||||
time: '2022-09-23T18:30:59Z'
|
||||
name: nginx-deployment
|
||||
namespace: default
|
||||
resourceVersion: '7492752'
|
||||
uid: 731f7434-d3d9-47fa-b179-d9368a84f7c9
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 2
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: nginx
|
||||
applications.argoproj.io/app-name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: 'nginx:1.23.1'
|
||||
imagePullPolicy: Never
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- non-existent-file
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 180
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
- containerPort: 8081
|
||||
protocol: UDP
|
||||
resources:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
cpu: 500m
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
availableReplicas: 2
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-09-18T23:50:25Z'
|
||||
lastUpdateTime: '2022-09-18T23:50:26Z'
|
||||
message: ReplicaSet "nginx-deployment-6d68ff5f86" has successfully progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
- lastTransitionTime: '2022-09-23T18:30:59Z'
|
||||
lastUpdateTime: '2022-09-23T18:30:59Z'
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: Available
|
||||
observedGeneration: 1
|
||||
readyReplicas: 2
|
||||
replicas: 2
|
||||
updatedReplicas: 2
|
|
@ -0,0 +1,124 @@
|
|||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "missing",
|
||||
"applications.argoproj.io/app-name": "nginx",
|
||||
"something-else": "bla"
|
||||
},
|
||||
"name": "nginx-deployment",
|
||||
"namespace": "default",
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {},
|
||||
"f:something-else": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:replicas": {},
|
||||
"f:selector": {},
|
||||
"f:template": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"nginx\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:imagePullPolicy": {},
|
||||
"f:livenessProbe": {
|
||||
"f:exec": {
|
||||
"f:command": {}
|
||||
},
|
||||
"f:initialDelaySeconds": {},
|
||||
"f:periodSeconds": {}
|
||||
},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
"k:{\"containerPort\":80,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:protocol": {}
|
||||
}
|
||||
},
|
||||
"f:resources": {
|
||||
"f:requests": {
|
||||
"f:cpu": {},
|
||||
"f:memory": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2022-09-18T23:50:25Z"
|
||||
}
|
||||
]
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 2,
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"app": "nginx"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "nginx",
|
||||
"applications.argoproj.io/app-name": "nginx"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"image": "nginx:1.23.1",
|
||||
"imagePullPolicy": "Never",
|
||||
"livenessProbe": {
|
||||
"exec": {
|
||||
"command": [
|
||||
"cat",
|
||||
"non-existent-file"
|
||||
]
|
||||
},
|
||||
"initialDelaySeconds": 5,
|
||||
"periodSeconds": 180
|
||||
},
|
||||
"name": "nginx",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8081,
|
||||
"protocol": "UDP"
|
||||
},
|
||||
{
|
||||
"containerPort": 80,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
"requests": {
|
||||
"memory": "512Mi",
|
||||
"cpu": "500m"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmpk
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
port: 1935
|
||||
targetPort: 1935
|
|
@ -0,0 +1,29 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmpk
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
port: 1935
|
||||
targetPort: 1935
|
||||
- name: rtmpq
|
||||
port: 1935
|
||||
protocol: UDP
|
||||
targetPort: 1935
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: 443
|
||||
- name: http3
|
||||
port: 443
|
||||
protocol: UDP
|
||||
targetPort: 443
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmpk
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
port: 1935
|
||||
targetPort: 1936
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: 443
|
|
@ -0,0 +1,110 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
kubectl.kubernetes.io/last-applied-configuration: >
|
||||
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"argocd.argoproj.io/sync-options":"ServerSideApply=true"},"name":"multiple-protocol-port-svc","namespace":"default"},"spec":{"ports":[{"name":"rtmpk","port":1986,"protocol":"UDP","targetPort":1986},{"name":"rtmp","port":1935,"protocol":"TCP","targetPort":1935},{"name":"rtmpq","port":1935,"protocol":"UDP","targetPort":1935}]}}
|
||||
creationTimestamp: '2022-06-24T19:37:02Z'
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
managedFields:
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
'f:argocd.argoproj.io/sync-options': {}
|
||||
'f:labels':
|
||||
'f:app.kubernetes.io/instance': {}
|
||||
'f:spec':
|
||||
'f:ports':
|
||||
'k:{"port":1935,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":1986,"protocol":"UDP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:protocol': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":443,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
'f:type': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2022-06-30T16:28:09Z'
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:kubectl.kubernetes.io/last-applied-configuration': {}
|
||||
'f:spec':
|
||||
'f:internalTrafficPolicy': {}
|
||||
'f:ports':
|
||||
.: {}
|
||||
'k:{"port":1935,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:protocol': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":1986,"protocol":"UDP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:protocol': {}
|
||||
'f:targetPort': {}
|
||||
'f:sessionAffinity': {}
|
||||
manager: kubectl-client-side-apply
|
||||
operation: Update
|
||||
time: '2022-06-25T04:18:10Z'
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:status':
|
||||
'f:loadBalancer':
|
||||
'f:ingress': {}
|
||||
manager: kube-vpnkit-forwarder
|
||||
operation: Update
|
||||
subresource: status
|
||||
time: '2022-06-29T12:36:34Z'
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
resourceVersion: '2138591'
|
||||
uid: af42e800-bd33-4412-bc77-d204d298613d
|
||||
spec:
|
||||
clusterIP: 10.111.193.74
|
||||
clusterIPs:
|
||||
- 10.111.193.74
|
||||
externalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: rtmpk
|
||||
nodePort: 31648
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
nodePort: 30018
|
||||
port: 1935
|
||||
protocol: TCP
|
||||
targetPort: 1935
|
||||
- name: https
|
||||
nodePort: 31975
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -0,0 +1,83 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
kubectl.kubernetes.io/last-applied-configuration: >
|
||||
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"argocd.argoproj.io/sync-options":"ServerSideApply=true"},"name":"multiple-protocol-port-svc","namespace":"default"},"spec":{"ports":[{"name":"rtmpk","port":1986,"protocol":"UDP","targetPort":1986},{"name":"rtmp","port":1935,"targetPort":1935},{"name":"https","port":443,"targetPort":443}]}}
|
||||
creationTimestamp: '2022-06-24T19:37:02Z'
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
managedFields:
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
'f:argocd.argoproj.io/sync-options': {}
|
||||
'f:labels':
|
||||
'f:app.kubernetes.io/instance': {}
|
||||
'f:spec':
|
||||
'f:ports':
|
||||
'k:{"port":1935,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":1986,"protocol":"UDP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:protocol': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":443,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2022-06-24T19:45:02Z'
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:argocd.argoproj.io/sync-options': {}
|
||||
'f:kubectl.kubernetes.io/last-applied-configuration': {}
|
||||
'f:spec':
|
||||
'f:internalTrafficPolicy': {}
|
||||
'f:sessionAffinity': {}
|
||||
'f:type': {}
|
||||
manager: kubectl-client-side-apply
|
||||
operation: Update
|
||||
time: '2022-06-24T19:37:02Z'
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
resourceVersion: '1825080'
|
||||
uid: af42e800-bd33-4412-bc77-d204d298613d
|
||||
spec:
|
||||
clusterIP: 10.111.193.74
|
||||
clusterIPs:
|
||||
- 10.111.193.74
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: rtmpk
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
port: 1935
|
||||
protocol: TCP
|
||||
targetPort: 1935
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nested-test-deployment
|
||||
namespace: default
|
||||
labels:
|
||||
app: nested-test
|
||||
applications.argoproj.io/app-name: nested-app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nested-test
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nested-test
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- name: main-container
|
||||
image: 'nginx:latest'
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
name: https
|
||||
env:
|
||||
- name: ENV_VAR1
|
||||
value: "value1"
|
||||
- name: ENV_VAR2
|
||||
value: "value2"
|
||||
resources:
|
||||
limits:
|
||||
memory: 100Mi
|
|
@ -0,0 +1,70 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nested-test-deployment
|
||||
namespace: default
|
||||
labels:
|
||||
app: nested-test
|
||||
applications.argoproj.io/app-name: nested-app
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: '1'
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
f:app: {}
|
||||
f:applications.argoproj.io/app-name: {}
|
||||
f:spec:
|
||||
f:replicas: {}
|
||||
f:selector: {}
|
||||
f:template:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
f:app: {}
|
||||
f:spec:
|
||||
f:containers:
|
||||
k:{"name":"main-container"}:
|
||||
.: {}
|
||||
f:image: {}
|
||||
f:name: {}
|
||||
f:ports:
|
||||
.: {}
|
||||
k:{"containerPort":80,"protocol":"TCP"}:
|
||||
.: {}
|
||||
f:containerPort: {}
|
||||
f:name: {}
|
||||
f:protocol: {}
|
||||
f:env:
|
||||
.: {}
|
||||
k:{"name":"ENV_VAR1"}:
|
||||
.: {}
|
||||
f:name: {}
|
||||
f:value: {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nested-test
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nested-test
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- name: main-container
|
||||
image: 'nginx:latest'
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: ENV_VAR1
|
||||
value: "value1"
|
||||
resources:
|
||||
limits:
|
||||
memory: "100Mi"
|
|
@ -0,0 +1,131 @@
|
|||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": "nested-test-deployment",
|
||||
"namespace": "default",
|
||||
"labels": {
|
||||
"app": "nested-test",
|
||||
"applications.argoproj.io/app-name": "nested-app"
|
||||
},
|
||||
"annotations": {
|
||||
"deployment.kubernetes.io/revision": "2"
|
||||
},
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:replicas": {},
|
||||
"f:selector": {},
|
||||
"f:template": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"main-container\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
".": {},
|
||||
"k:{\"containerPort\":80,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {},
|
||||
"f:protocol": {}
|
||||
},
|
||||
"k:{\"containerPort\":443,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {},
|
||||
"f:protocol": {}
|
||||
}
|
||||
},
|
||||
"f:env": {
|
||||
".": {},
|
||||
"k:{\"name\":\"ENV_VAR1\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:value": {}
|
||||
},
|
||||
"k:{\"name\":\"ENV_VAR2\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:value": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2023-12-19T00:00:00Z"
|
||||
}
|
||||
]
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"app": "nested-test"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "nested-test"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"automountServiceAccountToken": false,
|
||||
"containers": [
|
||||
{
|
||||
"name": "main-container",
|
||||
"image": "nginx:latest",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 80,
|
||||
"name": "http",
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"containerPort": 443,
|
||||
"name": "https",
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"env": [
|
||||
{
|
||||
"name": "ENV_VAR1",
|
||||
"value": "value1"
|
||||
},
|
||||
{
|
||||
"name": "ENV_VAR2",
|
||||
"value": "value2"
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
"limits": {
|
||||
"memory": "100Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: manual-apply-test-deployment
|
||||
namespace: default
|
||||
labels:
|
||||
app: manual-apply-app
|
||||
applications.argoproj.io/app-name: manual-apply-app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: manual-apply-test
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: manual-apply-test
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- name: main-container
|
||||
image: 'nginx:latest'
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
- containerPort: 40
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
memory: "100Mi"
|
|
@ -0,0 +1,181 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "4"
|
||||
creationTimestamp: "2025-02-25T00:20:45Z"
|
||||
generation: 4
|
||||
labels:
|
||||
app: manual-apply-app
|
||||
applications.argoproj.io/app-name: manual-apply-app
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:annotations: {}
|
||||
f:labels:
|
||||
.: {}
|
||||
f:app: {}
|
||||
f:applications.argoproj.io/app-name: {}
|
||||
f:spec:
|
||||
f:progressDeadlineSeconds: {}
|
||||
f:replicas: {}
|
||||
f:revisionHistoryLimit: {}
|
||||
f:selector: {}
|
||||
f:strategy:
|
||||
f:rollingUpdate:
|
||||
.: {}
|
||||
f:maxSurge: {}
|
||||
f:maxUnavailable: {}
|
||||
f:type: {}
|
||||
f:template:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
.: {}
|
||||
f:app: {}
|
||||
f:spec:
|
||||
f:automountServiceAccountToken: {}
|
||||
f:containers:
|
||||
k:{"name":"main-container"}:
|
||||
.: {}
|
||||
f:image: {}
|
||||
f:imagePullPolicy: {}
|
||||
f:name: {}
|
||||
f:ports:
|
||||
.: {}
|
||||
k:{"containerPort":80,"protocol":"TCP"}:
|
||||
.: {}
|
||||
f:containerPort: {}
|
||||
f:name: {}
|
||||
f:protocol: {}
|
||||
f:resources:
|
||||
.: {}
|
||||
f:limits:
|
||||
.: {}
|
||||
f:memory: {}
|
||||
f:terminationMessagePath: {}
|
||||
f:terminationMessagePolicy: {}
|
||||
f:dnsPolicy: {}
|
||||
f:restartPolicy: {}
|
||||
f:schedulerName: {}
|
||||
f:securityContext: {}
|
||||
f:terminationGracePeriodSeconds: {}
|
||||
manager: argocd-controller
|
||||
operation: Update
|
||||
time: "2025-02-25T01:19:32Z"
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:annotations:
|
||||
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||
f:spec:
|
||||
f:template:
|
||||
f:spec:
|
||||
f:containers:
|
||||
k:{"name":"idle"}:
|
||||
.: {}
|
||||
f:image: {}
|
||||
f:imagePullPolicy: {}
|
||||
f:name: {}
|
||||
f:ports:
|
||||
.: {}
|
||||
k:{"containerPort":8080,"protocol":"TCP"}:
|
||||
.: {}
|
||||
f:containerPort: {}
|
||||
f:name: {}
|
||||
f:protocol: {}
|
||||
f:resources: {}
|
||||
f:terminationMessagePath: {}
|
||||
f:terminationMessagePolicy: {}
|
||||
manager: kubectl-client-side-apply
|
||||
operation: Update
|
||||
time: "2025-02-25T01:29:34Z"
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:annotations:
|
||||
f:deployment.kubernetes.io/revision: {}
|
||||
f:status:
|
||||
f:availableReplicas: {}
|
||||
f:conditions:
|
||||
.: {}
|
||||
k:{"type":"Available"}:
|
||||
.: {}
|
||||
f:lastTransitionTime: {}
|
||||
f:lastUpdateTime: {}
|
||||
f:message: {}
|
||||
f:reason: {}
|
||||
f:status: {}
|
||||
f:type: {}
|
||||
k:{"type":"Progressing"}:
|
||||
.: {}
|
||||
f:lastTransitionTime: {}
|
||||
f:lastUpdateTime: {}
|
||||
f:message: {}
|
||||
f:reason: {}
|
||||
f:status: {}
|
||||
f:type: {}
|
||||
f:observedGeneration: {}
|
||||
f:readyReplicas: {}
|
||||
f:replicas: {}
|
||||
f:updatedReplicas: {}
|
||||
manager: kube-controller-manager
|
||||
operation: Update
|
||||
subresource: status
|
||||
time: "2025-02-25T01:29:44Z"
|
||||
name: manual-apply-test-deployment
|
||||
namespace: default
|
||||
resourceVersion: "46835"
|
||||
uid: c2ff066f-cbbd-408d-a015-85f1b6332193
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: manual-apply-test
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: manual-apply-test
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- image: nginx:latest
|
||||
imagePullPolicy: Always
|
||||
name: main-container
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
memory: 100Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
- image: spurin/idle:latest
|
||||
imagePullPolicy: Always
|
||||
name: idle
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: web
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
memory: 100Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
|
@ -0,0 +1,310 @@
|
|||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"deployment.kubernetes.io/revision": "4",
|
||||
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"manual-apply-app\",\"applications.argoproj.io/app-name\":\"manual-apply-app\"},\"name\":\"manual-apply-test-deployment\",\"namespace\":\"default\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"manual-apply-test\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"manual-apply-test\"}},\"spec\":{\"automountServiceAccountToken\":false,\"containers\":[{\"image\":\"nginx:latest\",\"name\":\"main-container\",\"ports\":[{\"containerPort\":80,\"name\":\"http\"}],\"resources\":{\"limits\":{\"memory\":\"100Mi\"}}},{\"image\":\"spurin/idle:latest\",\"name\":\"idle\",\"ports\":[{\"containerPort\":8080,\"name\":\"web\",\"protocol\":\"TCP\"}]}]}}}}\n"
|
||||
},
|
||||
"creationTimestamp": "2025-02-25T00:20:45Z",
|
||||
"generation": 5,
|
||||
"labels": {
|
||||
"app": "manual-apply-app",
|
||||
"applications.argoproj.io/app-name": "manual-apply-app",
|
||||
"mutation-test": "FROM-MUTATION-WEBHOOK"
|
||||
},
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:replicas": {},
|
||||
"f:selector": {},
|
||||
"f:template": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:automountServiceAccountToken": {},
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"main-container\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
"k:{\"containerPort\":40,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {}
|
||||
},
|
||||
"k:{\"containerPort\":80,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {}
|
||||
}
|
||||
},
|
||||
"f:resources": {
|
||||
"f:limits": {
|
||||
"f:memory": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2025-02-25T01:31:03Z"
|
||||
},
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:annotations": {},
|
||||
"f:labels": {
|
||||
".": {},
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:progressDeadlineSeconds": {},
|
||||
"f:replicas": {},
|
||||
"f:revisionHistoryLimit": {},
|
||||
"f:selector": {},
|
||||
"f:strategy": {
|
||||
"f:rollingUpdate": {
|
||||
".": {},
|
||||
"f:maxSurge": {},
|
||||
"f:maxUnavailable": {}
|
||||
},
|
||||
"f:type": {}
|
||||
},
|
||||
"f:template": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
".": {},
|
||||
"f:app": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:automountServiceAccountToken": {},
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"main-container\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:imagePullPolicy": {},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
".": {},
|
||||
"k:{\"containerPort\":80,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {},
|
||||
"f:protocol": {}
|
||||
}
|
||||
},
|
||||
"f:resources": {
|
||||
".": {},
|
||||
"f:limits": {
|
||||
".": {},
|
||||
"f:memory": {}
|
||||
}
|
||||
},
|
||||
"f:terminationMessagePath": {},
|
||||
"f:terminationMessagePolicy": {}
|
||||
}
|
||||
},
|
||||
"f:dnsPolicy": {},
|
||||
"f:restartPolicy": {},
|
||||
"f:schedulerName": {},
|
||||
"f:securityContext": {},
|
||||
"f:terminationGracePeriodSeconds": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Update",
|
||||
"time": "2025-02-25T01:19:32Z"
|
||||
},
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:annotations": {
|
||||
"f:kubectl.kubernetes.io/last-applied-configuration": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:template": {
|
||||
"f:spec": {
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"idle\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:imagePullPolicy": {},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
".": {},
|
||||
"k:{\"containerPort\":8080,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {},
|
||||
"f:protocol": {}
|
||||
}
|
||||
},
|
||||
"f:resources": {},
|
||||
"f:terminationMessagePath": {},
|
||||
"f:terminationMessagePolicy": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "kubectl-client-side-apply",
|
||||
"operation": "Update",
|
||||
"time": "2025-02-25T01:29:34Z"
|
||||
},
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:annotations": {
|
||||
"f:deployment.kubernetes.io/revision": {}
|
||||
}
|
||||
},
|
||||
"f:status": {
|
||||
"f:availableReplicas": {},
|
||||
"f:conditions": {
|
||||
".": {},
|
||||
"k:{\"type\":\"Available\"}": {
|
||||
".": {},
|
||||
"f:lastTransitionTime": {},
|
||||
"f:lastUpdateTime": {},
|
||||
"f:message": {},
|
||||
"f:reason": {},
|
||||
"f:status": {},
|
||||
"f:type": {}
|
||||
},
|
||||
"k:{\"type\":\"Progressing\"}": {
|
||||
".": {},
|
||||
"f:lastTransitionTime": {},
|
||||
"f:lastUpdateTime": {},
|
||||
"f:message": {},
|
||||
"f:reason": {},
|
||||
"f:status": {},
|
||||
"f:type": {}
|
||||
}
|
||||
},
|
||||
"f:observedGeneration": {},
|
||||
"f:readyReplicas": {},
|
||||
"f:replicas": {},
|
||||
"f:updatedReplicas": {}
|
||||
}
|
||||
},
|
||||
"manager": "kube-controller-manager",
|
||||
"operation": "Update",
|
||||
"subresource": "status",
|
||||
"time": "2025-02-25T01:29:44Z"
|
||||
}
|
||||
],
|
||||
"name": "manual-apply-test-deployment",
|
||||
"namespace": "default",
|
||||
"resourceVersion": "46835",
|
||||
"uid": "c2ff066f-cbbd-408d-a015-85f1b6332193"
|
||||
},
|
||||
"spec": {
|
||||
"progressDeadlineSeconds": 600,
|
||||
"replicas": 1,
|
||||
"revisionHistoryLimit": 10,
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"app": "manual-apply-test"
|
||||
}
|
||||
},
|
||||
"strategy": {
|
||||
"rollingUpdate": {
|
||||
"maxSurge": "25%",
|
||||
"maxUnavailable": "25%"
|
||||
},
|
||||
"type": "RollingUpdate"
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"labels": {
|
||||
"app": "manual-apply-test"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"automountServiceAccountToken": false,
|
||||
"containers": [
|
||||
{
|
||||
"image": "nginx:latest",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "main-container",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 80,
|
||||
"name": "http",
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"containerPort": 40,
|
||||
"name": "https",
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
"limits": {
|
||||
"memory": "100Mi"
|
||||
}
|
||||
},
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"terminationMessagePolicy": "File"
|
||||
},
|
||||
{
|
||||
"image": "spurin/idle:latest",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "idle",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8080,
|
||||
"name": "web",
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
"limits": {
|
||||
"memory": "100Mi"
|
||||
}
|
||||
},
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"terminationMessagePolicy": "File"
|
||||
}
|
||||
],
|
||||
"dnsPolicy": "ClusterFirst",
|
||||
"restartPolicy": "Always",
|
||||
"schedulerName": "default-scheduler",
|
||||
"securityContext": {},
|
||||
"terminationGracePeriodSeconds": 30
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: httpbin
|
||||
name: httpbin-svc
|
||||
namespace: httpbin
|
||||
spec:
|
||||
ports:
|
||||
- name: http-port
|
||||
port: 7777
|
||||
targetPort: 80
|
||||
- name: test
|
||||
port: 333
|
||||
selector:
|
||||
app: httpbin
|
|
@ -0,0 +1,55 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: '2023-12-18T00:34:22Z'
|
||||
labels:
|
||||
app.kubernetes.io/instance: httpbin
|
||||
managedFields:
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app.kubernetes.io/instance': {}
|
||||
'f:spec':
|
||||
'f:ports':
|
||||
'k:{"port":333,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'k:{"port":7777,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
'f:selector': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2023-12-18T00:34:22Z'
|
||||
name: httpbin-svc
|
||||
namespace: httpbin
|
||||
resourceVersion: '2836'
|
||||
uid: 0e898e6f-c275-476d-9b4f-5e96072cc129
|
||||
spec:
|
||||
clusterIP: 10.43.223.115
|
||||
clusterIPs:
|
||||
- 10.43.223.115
|
||||
internalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: http-port
|
||||
port: 7777
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
- name: test
|
||||
port: 333
|
||||
protocol: TCP
|
||||
targetPort: 333
|
||||
selector:
|
||||
app: httpbin
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -0,0 +1,74 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"creationTimestamp": "2023-12-18T00:34:22Z",
|
||||
"labels": {
|
||||
"event": "FROM-MUTATION-WEBHOOK"
|
||||
},
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:spec": {
|
||||
"f:ports": {
|
||||
"k:{\"port\":333,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:port": {}
|
||||
},
|
||||
"k:{\"port\":7777,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:port": {},
|
||||
"f:targetPort": {}
|
||||
}
|
||||
},
|
||||
"f:selector": {}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2023-12-18T00:38:28Z"
|
||||
}
|
||||
],
|
||||
"name": "httpbin-svc",
|
||||
"namespace": "httpbin",
|
||||
"resourceVersion": "2836",
|
||||
"uid": "0e898e6f-c275-476d-9b4f-5e96072cc129"
|
||||
},
|
||||
"spec": {
|
||||
"clusterIP": "10.43.223.115",
|
||||
"clusterIPs": [
|
||||
"10.43.223.115"
|
||||
],
|
||||
"internalTrafficPolicy": "Cluster",
|
||||
"ipFamilies": [
|
||||
"IPv4"
|
||||
],
|
||||
"ipFamilyPolicy": "SingleStack",
|
||||
"ports": [
|
||||
{
|
||||
"name": "http-port",
|
||||
"port": 7777,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 80
|
||||
},
|
||||
{
|
||||
"name": "test",
|
||||
"port": 333,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 333
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "httpbin"
|
||||
},
|
||||
"sessionAffinity": "None",
|
||||
"type": "ClusterIP"
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: "2025-05-16T19:01:22Z"
|
||||
labels:
|
||||
app.kubernetes.io/instance: httpbin
|
||||
delete-me: delete-value
|
||||
managedFields:
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
f:app.kubernetes.io/instance: {}
|
||||
f:delete-me: {}
|
||||
f:spec:
|
||||
f:ports:
|
||||
k:{"port":7777,"protocol":"TCP"}:
|
||||
.: {}
|
||||
f:name: {}
|
||||
f:port: {}
|
||||
f:protocol: {}
|
||||
f:targetPort: {}
|
||||
f:selector: {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: "2025-05-16T19:01:22Z"
|
||||
name: httpbin-svc
|
||||
namespace: httpbin
|
||||
resourceVersion: "159005"
|
||||
uid: 61a7a0c2-d973-4333-bbd6-c06ba1c00190
|
||||
spec:
|
||||
clusterIP: 10.96.59.144
|
||||
clusterIPs:
|
||||
- 10.96.59.144
|
||||
internalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: http-port
|
||||
port: 7777
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: httpbin
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: httpbin
|
||||
name: httpbin-svc
|
||||
namespace: httpbin
|
||||
spec:
|
||||
ports:
|
||||
- name: http-port
|
||||
port: 7777
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: httpbin
|
|
@ -0,0 +1,69 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"creationTimestamp": "2025-05-16T19:01:22Z",
|
||||
"labels": {
|
||||
"app.kubernetes.io/instance": "httpbin"
|
||||
},
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app.kubernetes.io/instance": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:ports": {
|
||||
"k:{\"port\":7777,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:port": {},
|
||||
"f:protocol": {},
|
||||
"f:targetPort": {}
|
||||
}
|
||||
},
|
||||
"f:selector": {}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2025-05-16T19:02:57Z"
|
||||
}
|
||||
],
|
||||
"name": "httpbin-svc",
|
||||
"namespace": "httpbin",
|
||||
"resourceVersion": "159005",
|
||||
"uid": "61a7a0c2-d973-4333-bbd6-c06ba1c00190"
|
||||
},
|
||||
"spec": {
|
||||
"clusterIP": "10.96.59.144",
|
||||
"clusterIPs": [
|
||||
"10.96.59.144"
|
||||
],
|
||||
"internalTrafficPolicy": "Cluster",
|
||||
"ipFamilies": [
|
||||
"IPv4"
|
||||
],
|
||||
"ipFamilyPolicy": "SingleStack",
|
||||
"ports": [
|
||||
{
|
||||
"name": "http-port",
|
||||
"port": 7777,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 80
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "httpbin"
|
||||
},
|
||||
"sessionAffinity": "None",
|
||||
"type": "ClusterIP"
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {}
|
||||
}
|
||||
}
|
|
@ -59,7 +59,7 @@ func NewEngine(config *rest.Config, clusterCache cache.ClusterCache, opts ...Opt
|
|||
func (e *gitOpsEngine) Run() (StopFunc, error) {
|
||||
err := e.cache.EnsureSynced()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to ensure the cache is synced: %w", err)
|
||||
}
|
||||
|
||||
return func() {
|
||||
|
@ -76,23 +76,23 @@ func (e *gitOpsEngine) Sync(ctx context.Context,
|
|||
) ([]common.ResourceSyncResult, error) {
|
||||
managedResources, err := e.cache.GetManagedLiveObjs(resources, isManaged)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get managed live objects: %w", err)
|
||||
}
|
||||
result := sync.Reconcile(resources, managedResources, namespace, e.cache)
|
||||
diffRes, err := diff.DiffArray(result.Target, result.Live, diff.WithLogr(e.log))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to diff objects: %w", err)
|
||||
}
|
||||
opts = append(opts, sync.WithSkipHooks(!diffRes.Modified))
|
||||
syncCtx, cleanup, err := sync.NewSyncContext(revision, result, e.config, e.config, e.kubectl, namespace, e.cache.GetOpenAPISchema(), opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to create sync context: %w", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
resUpdated := make(chan bool)
|
||||
resIgnore := make(chan struct{})
|
||||
unsubscribe := e.cache.OnResourceUpdated(func(newRes *cache.Resource, oldRes *cache.Resource, namespaceResources map[kube.ResourceKey]*cache.Resource) {
|
||||
unsubscribe := e.cache.OnResourceUpdated(func(newRes *cache.Resource, oldRes *cache.Resource, _ map[kube.ResourceKey]*cache.Resource) {
|
||||
var key kube.ResourceKey
|
||||
if newRes != nil {
|
||||
key = newRes.ResourceKey()
|
||||
|
@ -120,6 +120,7 @@ func (e *gitOpsEngine) Sync(ctx context.Context,
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
syncCtx.Terminate()
|
||||
//nolint:wrapcheck // don't wrap context errors
|
||||
return resources, ctx.Err()
|
||||
case <-time.After(operationRefreshTimeout):
|
||||
case <-resUpdated:
|
||||
|
|
|
@ -2,7 +2,7 @@ package engine
|
|||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/tracing"
|
||||
|
@ -16,7 +16,7 @@ type options struct {
|
|||
}
|
||||
|
||||
func applyOptions(opts []Option) options {
|
||||
log := klogr.New()
|
||||
log := textlogger.NewLogger(textlogger.NewConfig())
|
||||
o := options{
|
||||
log: log,
|
||||
kubectl: &kube.KubectlCmd{
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/hook"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
// Represents resource health status
|
||||
|
@ -47,7 +51,7 @@ var healthOrder = []HealthStatusCode{
|
|||
HealthStatusUnknown,
|
||||
}
|
||||
|
||||
// IsWorse returns whether or not the new health status code is a worser condition than the current
|
||||
// IsWorse returns whether or not the new health status code is a worse condition than the current
|
||||
func IsWorse(current, new HealthStatusCode) bool {
|
||||
currentIndex := 0
|
||||
newIndex := 0
|
||||
|
@ -64,7 +68,7 @@ func IsWorse(current, new HealthStatusCode) bool {
|
|||
|
||||
// GetResourceHealth returns the health of a k8s resource
|
||||
func GetResourceHealth(obj *unstructured.Unstructured, healthOverride HealthOverride) (health *HealthStatus, err error) {
|
||||
if obj.GetDeletionTimestamp() != nil {
|
||||
if obj.GetDeletionTimestamp() != nil && !hook.HasHookFinalizer(obj) {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Pending deletion",
|
||||
|
@ -78,7 +82,7 @@ func GetResourceHealth(obj *unstructured.Unstructured, healthOverride HealthOver
|
|||
Status: HealthStatusUnknown,
|
||||
Message: err.Error(),
|
||||
}
|
||||
return health, err
|
||||
return health, fmt.Errorf("failed to get resource health for %s/%s: %w", obj.GetNamespace(), obj.GetName(), err)
|
||||
}
|
||||
if health != nil {
|
||||
return health, nil
|
||||
|
@ -94,7 +98,6 @@ func GetResourceHealth(obj *unstructured.Unstructured, healthOverride HealthOver
|
|||
}
|
||||
}
|
||||
return health, err
|
||||
|
||||
}
|
||||
|
||||
// GetHealthCheckFunc returns built-in health check function or nil if health check is not supported
|
||||
|
@ -112,29 +115,19 @@ func GetHealthCheckFunc(gvk schema.GroupVersionKind) func(obj *unstructured.Unst
|
|||
return getDaemonSetHealth
|
||||
}
|
||||
case "extensions":
|
||||
switch gvk.Kind {
|
||||
case kube.DeploymentKind:
|
||||
return getDeploymentHealth
|
||||
case kube.IngressKind:
|
||||
if gvk.Kind == kube.IngressKind {
|
||||
return getIngressHealth
|
||||
case kube.ReplicaSetKind:
|
||||
return getReplicaSetHealth
|
||||
case kube.DaemonSetKind:
|
||||
return getDaemonSetHealth
|
||||
}
|
||||
case "argoproj.io":
|
||||
switch gvk.Kind {
|
||||
case "Workflow":
|
||||
if gvk.Kind == "Workflow" {
|
||||
return getArgoWorkflowHealth
|
||||
}
|
||||
case "apiregistration.k8s.io":
|
||||
switch gvk.Kind {
|
||||
case kube.APIServiceKind:
|
||||
if gvk.Kind == kube.APIServiceKind {
|
||||
return getAPIServiceHealth
|
||||
}
|
||||
case "networking.k8s.io":
|
||||
switch gvk.Kind {
|
||||
case kube.IngressKind:
|
||||
if gvk.Kind == kube.IngressKind {
|
||||
return getIngressHealth
|
||||
}
|
||||
case "":
|
||||
|
@ -147,13 +140,11 @@ func GetHealthCheckFunc(gvk schema.GroupVersionKind) func(obj *unstructured.Unst
|
|||
return getPodHealth
|
||||
}
|
||||
case "batch":
|
||||
switch gvk.Kind {
|
||||
case kube.JobKind:
|
||||
if gvk.Kind == kube.JobKind {
|
||||
return getJobHealth
|
||||
}
|
||||
case "autoscaling":
|
||||
switch gvk.Kind {
|
||||
case kube.HorizontalPodAutoscalerKind:
|
||||
if gvk.Kind == kube.HorizontalPodAutoscalerKind {
|
||||
return getHPAHealth
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,11 +3,12 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getAPIServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -17,14 +18,14 @@ func getAPIServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
var apiService apiregistrationv1.APIService
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &apiService)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %w", err)
|
||||
}
|
||||
return getApiregistrationv1APIServiceHealth(&apiService)
|
||||
case apiregistrationv1beta1.SchemeGroupVersion.WithKind(kube.APIServiceKind):
|
||||
var apiService apiregistrationv1beta1.APIService
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &apiService)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %w", err)
|
||||
}
|
||||
return getApiregistrationv1beta1APIServiceHealth(&apiService)
|
||||
default:
|
||||
|
@ -34,19 +35,17 @@ func getAPIServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
|
||||
func getApiregistrationv1APIServiceHealth(apiservice *apiregistrationv1.APIService) (*HealthStatus, error) {
|
||||
for _, c := range apiservice.Status.Conditions {
|
||||
switch c.Type {
|
||||
case apiregistrationv1.Available:
|
||||
if c.Type == apiregistrationv1.Available {
|
||||
if c.Status == apiregistrationv1.ConditionTrue {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &HealthStatus{
|
||||
|
@ -57,19 +56,17 @@ func getApiregistrationv1APIServiceHealth(apiservice *apiregistrationv1.APIServi
|
|||
|
||||
func getApiregistrationv1beta1APIServiceHealth(apiservice *apiregistrationv1beta1.APIService) (*HealthStatus, error) {
|
||||
for _, c := range apiservice.Status.Conditions {
|
||||
switch c.Type {
|
||||
case apiregistrationv1beta1.Available:
|
||||
if c.Type == apiregistrationv1beta1.Available {
|
||||
if c.Status == apiregistrationv1beta1.ConditionTrue {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &HealthStatus{
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
@ -30,7 +32,7 @@ func getArgoWorkflowHealth(obj *unstructured.Unstructured) (*HealthStatus, error
|
|||
var wf argoWorkflow
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &wf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to convert unstructured to argoworkflow: %w", err)
|
||||
}
|
||||
switch wf.Status.Phase {
|
||||
case "", nodePending, nodeRunning:
|
||||
|
|
|
@ -3,12 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getDaemonSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -18,23 +17,9 @@ func getDaemonSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var daemon appsv1.DaemonSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &daemon)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured DaemonSet to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured DaemonSet to typed: %w", err)
|
||||
}
|
||||
return getAppsv1DaemonSetHealth(&daemon)
|
||||
case appsv1beta2.SchemeGroupVersion.WithKind(kube.DaemonSetKind):
|
||||
var daemon appsv1beta2.DaemonSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &daemon)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured DaemonSet to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta1DaemonSetHealth(&daemon)
|
||||
case extv1beta1.SchemeGroupVersion.WithKind(kube.DaemonSetKind):
|
||||
var daemon extv1beta1.DaemonSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &daemon)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured DaemonSet to typed: %v", err)
|
||||
}
|
||||
return getExtv1beta1DaemonSetHealth(&daemon)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported DaemonSet GVK: %s", gvk)
|
||||
}
|
||||
|
@ -42,93 +27,28 @@ func getDaemonSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
|
||||
func getAppsv1DaemonSetHealth(daemon *appsv1.DaemonSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L110
|
||||
if daemon.Generation <= daemon.Status.ObservedGeneration {
|
||||
if daemon.Spec.UpdateStrategy.Type == appsv1.OnDeleteDaemonSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("daemon set %d out of %d new pods have been updated", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
if daemon.Generation > daemon.Status.ObservedGeneration {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed daemon set generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta1DaemonSetHealth(daemon *appsv1beta2.DaemonSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L110
|
||||
if daemon.Generation <= daemon.Status.ObservedGeneration {
|
||||
if daemon.Spec.UpdateStrategy.Type == appsv1beta2.OnDeleteDaemonSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("daemon set %d out of %d new pods have been updated", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed daemon set generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getExtv1beta1DaemonSetHealth(daemon *extv1beta1.DaemonSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L110
|
||||
if daemon.Generation <= daemon.Status.ObservedGeneration {
|
||||
if daemon.Spec.UpdateStrategy.Type == extv1beta1.OnDeleteDaemonSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("daemon set %d out of %d new pods have been updated", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed daemon set generation less then desired generation",
|
||||
Message: "Waiting for rollout to finish: observed daemon set generation less than desired generation",
|
||||
}, nil
|
||||
}
|
||||
if daemon.Spec.UpdateStrategy.Type == appsv1.OnDeleteDaemonSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("daemon set %d out of %d new pods have been updated", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
|
|
|
@ -3,12 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getDeploymentHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -18,23 +17,9 @@ func getDeploymentHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
var deployment appsv1.Deployment
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Deployment to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured Deployment to typed: %w", err)
|
||||
}
|
||||
return getAppsv1DeploymentHealth(&deployment)
|
||||
case appsv1beta1.SchemeGroupVersion.WithKind(kube.DeploymentKind):
|
||||
var deployment appsv1beta1.Deployment
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Deployment to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta1DeploymentHealth(&deployment)
|
||||
case extv1beta1.SchemeGroupVersion.WithKind(kube.DeploymentKind):
|
||||
var deployment extv1beta1.Deployment
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Deployment to typed: %v", err)
|
||||
}
|
||||
return getExtv1beta1DeploymentHealth(&deployment)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported Deployment GVK: %s", gvk)
|
||||
}
|
||||
|
@ -50,22 +35,23 @@ func getAppsv1DeploymentHealth(deployment *appsv1.Deployment) (*HealthStatus, er
|
|||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L80
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
cond := getAppsv1DeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
|
||||
switch {
|
||||
case cond != nil && cond.Reason == "ProgressDeadlineExceeded":
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: fmt.Sprintf("Deployment %q exceeded its progress deadline", deployment.Name),
|
||||
}, nil
|
||||
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
|
||||
case deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas),
|
||||
}, nil
|
||||
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
|
||||
case deployment.Status.Replicas > deployment.Status.UpdatedReplicas:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...", deployment.Status.Replicas-deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
|
||||
case deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas),
|
||||
|
@ -74,93 +60,7 @@ func getAppsv1DeploymentHealth(deployment *appsv1.Deployment) (*HealthStatus, er
|
|||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed deployment generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta1DeploymentHealth(deployment *appsv1beta1.Deployment) (*HealthStatus, error) {
|
||||
if deployment.Spec.Paused {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusSuspended,
|
||||
Message: "Deployment is paused",
|
||||
}, nil
|
||||
}
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L80
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
cond := getAppsv1beta1DeploymentCondition(deployment.Status, appsv1beta1.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: fmt.Sprintf("Deployment %q exceeded its progress deadline", deployment.Name),
|
||||
}, nil
|
||||
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas),
|
||||
}, nil
|
||||
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...", deployment.Status.Replicas-deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed deployment generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getExtv1beta1DeploymentHealth(deployment *extv1beta1.Deployment) (*HealthStatus, error) {
|
||||
if deployment.Spec.Paused {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusSuspended,
|
||||
Message: "Deployment is paused",
|
||||
}, nil
|
||||
}
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L80
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
cond := getExtv1beta1DeploymentCondition(deployment.Status, extv1beta1.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: fmt.Sprintf("Deployment %q exceeded its progress deadline", deployment.Name),
|
||||
}, nil
|
||||
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas),
|
||||
}, nil
|
||||
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...", deployment.Status.Replicas-deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed deployment generation less then desired generation",
|
||||
Message: "Waiting for rollout to finish: observed deployment generation less than desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -178,22 +78,3 @@ func getAppsv1DeploymentCondition(status appsv1.DeploymentStatus, condType appsv
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func getAppsv1beta1DeploymentCondition(status appsv1beta1.DeploymentStatus, condType appsv1beta1.DeploymentConditionType) *appsv1beta1.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getExtv1beta1DeploymentCondition(status extv1beta1.DeploymentStatus, condType extv1beta1.DeploymentConditionType) *extv1beta1.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -13,17 +14,16 @@ import (
|
|||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
var (
|
||||
progressingStatus = &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting to Autoscale",
|
||||
}
|
||||
)
|
||||
var progressingStatus = &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting to Autoscale",
|
||||
}
|
||||
|
||||
type hpaCondition struct {
|
||||
Type string
|
||||
Reason string
|
||||
Message string
|
||||
Status string
|
||||
}
|
||||
|
||||
func getHPAHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -52,11 +52,33 @@ func getHPAHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
return nil, fmt.Errorf(failedConversionMsg, err)
|
||||
}
|
||||
return getAutoScalingV2beta2HPAHealth(&hpa)
|
||||
case autoscalingv2.SchemeGroupVersion.WithKind(kube.HorizontalPodAutoscalerKind):
|
||||
var hpa autoscalingv2.HorizontalPodAutoscaler
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &hpa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(failedConversionMsg, err)
|
||||
}
|
||||
return getAutoScalingV2HPAHealth(&hpa)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported HPA GVK: %s", gvk)
|
||||
}
|
||||
}
|
||||
|
||||
func getAutoScalingV2HPAHealth(hpa *autoscalingv2.HorizontalPodAutoscaler) (*HealthStatus, error) {
|
||||
statusConditions := hpa.Status.Conditions
|
||||
conditions := make([]hpaCondition, 0, len(statusConditions))
|
||||
for _, statusCondition := range statusConditions {
|
||||
conditions = append(conditions, hpaCondition{
|
||||
Type: string(statusCondition.Type),
|
||||
Reason: statusCondition.Reason,
|
||||
Message: statusCondition.Message,
|
||||
Status: string(statusCondition.Status),
|
||||
})
|
||||
}
|
||||
|
||||
return checkConditions(conditions, progressingStatus)
|
||||
}
|
||||
|
||||
func getAutoScalingV2beta2HPAHealth(hpa *autoscalingv2beta2.HorizontalPodAutoscaler) (*HealthStatus, error) {
|
||||
statusConditions := hpa.Status.Conditions
|
||||
conditions := make([]hpaCondition, 0, len(statusConditions))
|
||||
|
@ -65,6 +87,7 @@ func getAutoScalingV2beta2HPAHealth(hpa *autoscalingv2beta2.HorizontalPodAutosca
|
|||
Type: string(statusCondition.Type),
|
||||
Reason: statusCondition.Reason,
|
||||
Message: statusCondition.Message,
|
||||
Status: string(statusCondition.Status),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -79,6 +102,7 @@ func getAutoScalingV2beta1HPAHealth(hpa *autoscalingv2beta1.HorizontalPodAutosca
|
|||
Type: string(statusCondition.Type),
|
||||
Reason: statusCondition.Reason,
|
||||
Message: statusCondition.Message,
|
||||
Status: string(statusCondition.Status),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -141,14 +165,9 @@ func isDegraded(condition *hpaCondition) bool {
|
|||
}
|
||||
|
||||
func isHealthy(condition *hpaCondition) bool {
|
||||
healthy_states := []hpaCondition{
|
||||
{Type: "AbleToScale", Reason: "SucceededRescale"},
|
||||
{Type: "ScalingLimited", Reason: "DesiredWithinRange"},
|
||||
{Type: "ScalingLimited", Reason: "TooFewReplicas"},
|
||||
{Type: "ScalingLimited", Reason: "TooManyReplicas"},
|
||||
}
|
||||
for _, healthy_state := range healthy_states {
|
||||
if condition.Type == healthy_state.Type && condition.Reason == healthy_state.Reason {
|
||||
healthyConditionTypes := []string{"AbleToScale", "ScalingLimited"}
|
||||
for _, conditionType := range healthyConditionTypes {
|
||||
if condition.Type == conditionType && condition.Status == "True" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,68 +1,13 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func getIngressHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
gvk := obj.GroupVersionKind()
|
||||
switch gvk {
|
||||
case networkingv1.SchemeGroupVersion.WithKind(kube.IngressKind):
|
||||
var ingress networkingv1.Ingress
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &ingress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Ingress to typed: %v", err)
|
||||
}
|
||||
return getNetworkingv1IngressHealth(&ingress)
|
||||
case networkingv1beta1.SchemeGroupVersion.WithKind(kube.IngressKind):
|
||||
var ingress networkingv1beta1.Ingress
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &ingress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Ingress to typed: %v", err)
|
||||
}
|
||||
return getNetworkingv1beta1IngressHealth(&ingress)
|
||||
case extv1beta1.SchemeGroupVersion.WithKind(kube.IngressKind):
|
||||
var ingress extv1beta1.Ingress
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &ingress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Ingress to typed: %v", err)
|
||||
}
|
||||
return getExtv1beta1IngressHealth(&ingress)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported Ingress GVK: %s", gvk)
|
||||
}
|
||||
}
|
||||
|
||||
func getNetworkingv1IngressHealth(ingress *networkingv1.Ingress) (*HealthStatus, error) {
|
||||
ingresses, _, _ := unstructured.NestedSlice(obj.Object, "status", "loadBalancer", "ingress")
|
||||
health := HealthStatus{}
|
||||
if len(ingress.Status.LoadBalancer.Ingress) > 0 {
|
||||
health.Status = HealthStatusHealthy
|
||||
} else {
|
||||
health.Status = HealthStatusProgressing
|
||||
}
|
||||
return &health, nil
|
||||
}
|
||||
|
||||
func getNetworkingv1beta1IngressHealth(ingress *networkingv1beta1.Ingress) (*HealthStatus, error) {
|
||||
health := HealthStatus{}
|
||||
if len(ingress.Status.LoadBalancer.Ingress) > 0 {
|
||||
health.Status = HealthStatusHealthy
|
||||
} else {
|
||||
health.Status = HealthStatusProgressing
|
||||
}
|
||||
return &health, nil
|
||||
}
|
||||
|
||||
func getExtv1beta1IngressHealth(ingress *extv1beta1.Ingress) (*HealthStatus, error) {
|
||||
health := HealthStatus{}
|
||||
if len(ingress.Status.LoadBalancer.Ingress) > 0 {
|
||||
if len(ingresses) > 0 {
|
||||
health.Status = HealthStatusHealthy
|
||||
} else {
|
||||
health.Status = HealthStatusProgressing
|
||||
|
|
|
@ -3,10 +3,13 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getJobHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -16,7 +19,7 @@ func getJobHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var job batchv1.Job
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &job)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Job to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured Job to typed: %w", err)
|
||||
}
|
||||
return getBatchv1JobHealth(&job)
|
||||
default:
|
||||
|
@ -29,6 +32,7 @@ func getBatchv1JobHealth(job *batchv1.Job) (*HealthStatus, error) {
|
|||
var failMsg string
|
||||
complete := false
|
||||
var message string
|
||||
isSuspended := false
|
||||
for _, condition := range job.Status.Conditions {
|
||||
switch condition.Type {
|
||||
case batchv1.JobFailed:
|
||||
|
@ -38,19 +42,31 @@ func getBatchv1JobHealth(job *batchv1.Job) (*HealthStatus, error) {
|
|||
case batchv1.JobComplete:
|
||||
complete = true
|
||||
message = condition.Message
|
||||
case batchv1.JobSuspended:
|
||||
complete = true
|
||||
message = condition.Message
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
isSuspended = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !complete {
|
||||
switch {
|
||||
case !complete:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: message,
|
||||
}, nil
|
||||
} else if failed {
|
||||
case failed:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: failMsg,
|
||||
}, nil
|
||||
} else {
|
||||
case isSuspended:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusSuspended,
|
||||
Message: failMsg,
|
||||
}, nil
|
||||
default:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: message,
|
||||
|
|
|
@ -4,11 +4,12 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getPodHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -18,7 +19,7 @@ func getPodHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var pod corev1.Pod
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Pod to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured Pod to typed: %w", err)
|
||||
}
|
||||
return getCorev1PodHealth(&pod)
|
||||
default:
|
||||
|
|
|
@ -3,10 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getPVCHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -16,7 +17,7 @@ func getPVCHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var pvc corev1.PersistentVolumeClaim
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &pvc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured PersistentVolumeClaim to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured PersistentVolumeClaim to typed: %w", err)
|
||||
}
|
||||
return getCorev1PVCHealth(&pvc)
|
||||
default:
|
||||
|
|
|
@ -3,13 +3,12 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getReplicaSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -19,23 +18,9 @@ func getReplicaSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
var replicaSet appsv1.ReplicaSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &replicaSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured ReplicaSet to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured ReplicaSet to typed: %w", err)
|
||||
}
|
||||
return getAppsv1ReplicaSetHealth(&replicaSet)
|
||||
case appsv1beta2.SchemeGroupVersion.WithKind(kube.ReplicaSetKind):
|
||||
var replicaSet appsv1beta2.ReplicaSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &replicaSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured ReplicaSet to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta1ReplicaSetHealth(&replicaSet)
|
||||
case extv1beta1.SchemeGroupVersion.WithKind(kube.ReplicaSetKind):
|
||||
var replicaSet extv1beta1.ReplicaSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &replicaSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured ReplicaSet to typed: %v", err)
|
||||
}
|
||||
return getExtv1beta1ReplicaSetHealth(&replicaSet)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported ReplicaSet GVK: %s", gvk)
|
||||
}
|
||||
|
@ -58,59 +43,7 @@ func getAppsv1ReplicaSetHealth(replicaSet *appsv1.ReplicaSet) (*HealthStatus, er
|
|||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed replica set generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta1ReplicaSetHealth(replicaSet *appsv1beta2.ReplicaSet) (*HealthStatus, error) {
|
||||
if replicaSet.Generation <= replicaSet.Status.ObservedGeneration {
|
||||
cond := getAppsv1beta2ReplicaSetCondition(replicaSet.Status, appsv1beta2.ReplicaSetReplicaFailure)
|
||||
if cond != nil && cond.Status == corev1.ConditionTrue {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: cond.Message,
|
||||
}, nil
|
||||
} else if replicaSet.Spec.Replicas != nil && replicaSet.Status.AvailableReplicas < *replicaSet.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas are available...", replicaSet.Status.AvailableReplicas, *replicaSet.Spec.Replicas),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed replica set generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getExtv1beta1ReplicaSetHealth(replicaSet *extv1beta1.ReplicaSet) (*HealthStatus, error) {
|
||||
if replicaSet.Generation <= replicaSet.Status.ObservedGeneration {
|
||||
cond := getExtv1beta1ReplicaSetCondition(replicaSet.Status, extv1beta1.ReplicaSetReplicaFailure)
|
||||
if cond != nil && cond.Status == corev1.ConditionTrue {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: cond.Message,
|
||||
}, nil
|
||||
} else if replicaSet.Spec.Replicas != nil && replicaSet.Status.AvailableReplicas < *replicaSet.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas are available...", replicaSet.Status.AvailableReplicas, *replicaSet.Spec.Replicas),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed replica set generation less then desired generation",
|
||||
Message: "Waiting for rollout to finish: observed replica set generation less than desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -128,23 +61,3 @@ func getAppsv1ReplicaSetCondition(status appsv1.ReplicaSetStatus, condType appsv
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAppsv1beta2ReplicaSetCondition(status appsv1beta2.ReplicaSetStatus, condType appsv1beta2.ReplicaSetConditionType) *appsv1beta2.ReplicaSetCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getExtv1beta1ReplicaSetCondition(status extv1beta1.ReplicaSetStatus, condType extv1beta1.ReplicaSetConditionType) *extv1beta1.ReplicaSetCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -3,10 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -16,7 +17,7 @@ func getServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var service corev1.Service
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &service)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Service to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured Service to typed: %w", err)
|
||||
}
|
||||
return getCorev1ServiceHealth(&service)
|
||||
default:
|
||||
|
|
|
@ -3,12 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getStatefulSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -18,23 +17,9 @@ func getStatefulSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
var sts appsv1.StatefulSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &sts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured StatefulSet to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured StatefulSet to typed: %w", err)
|
||||
}
|
||||
return getAppsv1StatefulSetHealth(&sts)
|
||||
case appsv1beta1.SchemeGroupVersion.WithKind(kube.StatefulSetKind):
|
||||
var sts appsv1beta1.StatefulSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &sts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured StatefulSet to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta1StatefulSetHealth(&sts)
|
||||
case appsv1beta2.SchemeGroupVersion.WithKind(kube.StatefulSetKind):
|
||||
var sts appsv1beta2.StatefulSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &sts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured StatefulSet to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta2StatefulSetHealth(&sts)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported StatefulSet GVK: %s", gvk)
|
||||
}
|
||||
|
@ -86,102 +71,3 @@ func getAppsv1StatefulSetHealth(sts *appsv1.StatefulSet) (*HealthStatus, error)
|
|||
Message: fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...", sts.Status.CurrentReplicas, sts.Status.CurrentRevision),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta1StatefulSetHealth(sts *appsv1beta1.StatefulSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L131
|
||||
observedGeneration := sts.Status.ObservedGeneration
|
||||
if observedGeneration == nil {
|
||||
var x int64
|
||||
observedGeneration = &x
|
||||
}
|
||||
if *observedGeneration == 0 || sts.Generation > *observedGeneration {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for statefulset spec update to be observed...",
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for %d pods to be ready...", *sts.Spec.Replicas-sts.Status.ReadyReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.UpdateStrategy.Type == appsv1beta1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil {
|
||||
if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
|
||||
if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...",
|
||||
sts.Status.UpdatedReplicas, (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition)),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...", sts.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.UpdateStrategy.Type == appsv1beta1.OnDeleteStatefulSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("statefulset has %d ready pods", sts.Status.ReadyReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Status.UpdateRevision != sts.Status.CurrentRevision {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("waiting for statefulset rolling update to complete %d pods at revision %s...", sts.Status.UpdatedReplicas, sts.Status.UpdateRevision),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...", sts.Status.CurrentReplicas, sts.Status.CurrentRevision),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta2StatefulSetHealth(sts *appsv1beta2.StatefulSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L131
|
||||
if sts.Status.ObservedGeneration == 0 || sts.Generation > sts.Status.ObservedGeneration {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for statefulset spec update to be observed...",
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for %d pods to be ready...", *sts.Spec.Replicas-sts.Status.ReadyReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.UpdateStrategy.Type == appsv1beta2.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil {
|
||||
if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
|
||||
if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...",
|
||||
sts.Status.UpdatedReplicas, (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition)),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...", sts.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.UpdateStrategy.Type == appsv1beta2.OnDeleteStatefulSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("statefulset has %d ready pods", sts.Status.ReadyReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Status.UpdateRevision != sts.Status.CurrentRevision {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("waiting for statefulset rolling update to complete %d pods at revision %s...", sts.Status.UpdatedReplicas, sts.Status.UpdateRevision),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...", sts.Status.CurrentReplicas, sts.Status.CurrentRevision),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ Package provides functionality that allows assessing the health state of a Kuber
|
|||
package health
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -15,13 +15,15 @@ import (
|
|||
)
|
||||
|
||||
func assertAppHealth(t *testing.T, yamlPath string, expectedStatus HealthStatusCode) {
|
||||
health := getHealthStatus(yamlPath, t)
|
||||
t.Helper()
|
||||
health := getHealthStatus(t, yamlPath)
|
||||
assert.NotNil(t, health)
|
||||
assert.Equal(t, expectedStatus, health.Status)
|
||||
}
|
||||
|
||||
func getHealthStatus(yamlPath string, t *testing.T) *HealthStatus {
|
||||
yamlBytes, err := ioutil.ReadFile(yamlPath)
|
||||
func getHealthStatus(t *testing.T, yamlPath string) *HealthStatus {
|
||||
t.Helper()
|
||||
yamlBytes, err := os.ReadFile(yamlPath)
|
||||
require.NoError(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
|
@ -49,6 +51,7 @@ func TestStatefulSetOnDeleteHealth(t *testing.T) {
|
|||
func TestDaemonSetOnDeleteHealth(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/daemonset-ondelete.yaml", HealthStatusHealthy)
|
||||
}
|
||||
|
||||
func TestPVCHealth(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/pvc-bound.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/pvc-pending.yaml", HealthStatusProgressing)
|
||||
|
@ -68,17 +71,22 @@ func TestIngressHealth(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCRD(t *testing.T) {
|
||||
assert.Nil(t, getHealthStatus("./testdata/knative-service.yaml", t))
|
||||
assert.Nil(t, getHealthStatus(t, "./testdata/knative-service.yaml"))
|
||||
}
|
||||
|
||||
func TestJob(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/job-running.yaml", HealthStatusProgressing)
|
||||
assertAppHealth(t, "./testdata/job-failed.yaml", HealthStatusDegraded)
|
||||
assertAppHealth(t, "./testdata/job-succeeded.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/job-suspended.yaml", HealthStatusSuspended)
|
||||
}
|
||||
|
||||
func TestHPA(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/hpa-v2-healthy.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v2-degraded.yaml", HealthStatusDegraded)
|
||||
assertAppHealth(t, "./testdata/hpa-v2-progressing.yaml", HealthStatusProgressing)
|
||||
assertAppHealth(t, "./testdata/hpa-v2beta2-healthy.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v2beta1-healthy-disabled.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v2beta1-healthy.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v1-degraded.yaml", HealthStatusDegraded)
|
||||
assertAppHealth(t, "./testdata/hpa-v1-healthy.yaml", HealthStatusHealthy)
|
||||
|
@ -102,8 +110,8 @@ func TestPod(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestApplication(t *testing.T) {
|
||||
assert.Nil(t, getHealthStatus("./testdata/application-healthy.yaml", t))
|
||||
assert.Nil(t, getHealthStatus("./testdata/application-degraded.yaml", t))
|
||||
assert.Nil(t, getHealthStatus(t, "./testdata/application-healthy.yaml"))
|
||||
assert.Nil(t, getHealthStatus(t, "./testdata/application-degraded.yaml"))
|
||||
}
|
||||
|
||||
func TestAPIService(t *testing.T) {
|
||||
|
@ -114,16 +122,17 @@ func TestAPIService(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetArgoWorkflowHealth(t *testing.T) {
|
||||
sampleWorkflow := unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
sampleWorkflow := unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"spec": map[string]any{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
},
|
||||
"status": map[string]any{
|
||||
"phase": "Running",
|
||||
"message": "This node is running",
|
||||
},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"phase": "Running",
|
||||
"message": "This node is running",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
health, err := getArgoWorkflowHealth(&sampleWorkflow)
|
||||
|
@ -131,16 +140,17 @@ func TestGetArgoWorkflowHealth(t *testing.T) {
|
|||
assert.Equal(t, HealthStatusProgressing, health.Status)
|
||||
assert.Equal(t, "This node is running", health.Message)
|
||||
|
||||
sampleWorkflow = unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
sampleWorkflow = unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"spec": map[string]any{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
},
|
||||
"status": map[string]any{
|
||||
"phase": "Succeeded",
|
||||
"message": "This node is has succeeded",
|
||||
},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"phase": "Succeeded",
|
||||
"message": "This node is has succeeded",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
health, err = getArgoWorkflowHealth(&sampleWorkflow)
|
||||
|
@ -148,17 +158,17 @@ func TestGetArgoWorkflowHealth(t *testing.T) {
|
|||
assert.Equal(t, HealthStatusHealthy, health.Status)
|
||||
assert.Equal(t, "This node is has succeeded", health.Message)
|
||||
|
||||
sampleWorkflow = unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
sampleWorkflow = unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"spec": map[string]any{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
health, err = getArgoWorkflowHealth(&sampleWorkflow)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, HealthStatusProgressing, health.Status)
|
||||
assert.Equal(t, "", health.Message)
|
||||
|
||||
assert.Empty(t, health.Message)
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "4"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1beta2","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
creationTimestamp: 2018-07-18T04:40:44Z
|
||||
generation: 4
|
||||
labels:
|
||||
|
@ -12,7 +12,7 @@ metadata:
|
|||
name: guestbook-ui
|
||||
namespace: default
|
||||
resourceVersion: "13660"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/guestbook-ui
|
||||
selfLink: /apis/apps/v1/namespaces/default/deployments/guestbook-ui
|
||||
uid: bb9af0c7-8a44-11e8-9e23-42010aa80010
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "4"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1beta2","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
creationTimestamp: 2018-07-18T04:40:44Z
|
||||
generation: 4
|
||||
labels:
|
||||
|
@ -12,7 +12,7 @@ metadata:
|
|||
name: guestbook-ui
|
||||
namespace: default
|
||||
resourceVersion: "12819"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/guestbook-ui
|
||||
selfLink: /apis/apps/v1/namespaces/default/deployments/guestbook-ui
|
||||
uid: bb9af0c7-8a44-11e8-9e23-42010aa80010
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "4"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1beta2","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
creationTimestamp: 2018-07-18T04:40:44Z
|
||||
generation: 4
|
||||
labels:
|
||||
|
@ -12,7 +12,7 @@ metadata:
|
|||
name: guestbook-ui
|
||||
namespace: default
|
||||
resourceVersion: "12819"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/guestbook-ui
|
||||
selfLink: /apis/apps/v1/namespaces/default/deployments/guestbook-ui
|
||||
uid: bb9af0c7-8a44-11e8-9e23-42010aa80010
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: autoscaling/v1
|
|||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
annotations:
|
||||
autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"True","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"SucceededGetScale","message":"the HPA controller was able to get the target''s current scale"}]'
|
||||
autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"False","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"SucceededGetScale","message":"the HPA controller was not able to get the target''s current scale"}]'
|
||||
name: sample
|
||||
namespace: argocd
|
||||
spec:
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
creationTimestamp: "2022-01-17T14:22:27Z"
|
||||
name: sample
|
||||
uid: 0e6d855e-83ed-4ed5-b80a-461a750f14db
|
||||
spec:
|
||||
maxReplicas: 2
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: argocd-server
|
||||
targetCPUUtilizationPercentage: 80
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: "2022-04-14T19:44:23Z"
|
||||
message: 'the HPA controller was unable to get the target''s current scale: deployments/scale.apps
|
||||
"sandbox-test-app-8" not found'
|
||||
reason: FailedGetScale
|
||||
status: "False"
|
||||
type: AbleToScale
|
||||
- lastTransitionTime: "2022-04-14T15:41:57Z"
|
||||
message: the HPA was able to successfully calculate a replica count from cpu resource
|
||||
utilization (percentage of request)
|
||||
reason: ValidMetricFound
|
||||
status: "True"
|
||||
type: ScalingActive
|
||||
- lastTransitionTime: "2022-01-17T14:24:13Z"
|
||||
message: the desired count is within the acceptable range
|
||||
reason: DesiredWithinRange
|
||||
status: "False"
|
||||
type: ScalingLimited
|
||||
currentMetrics:
|
||||
- resource:
|
||||
current:
|
||||
averageUtilization: 6
|
||||
averageValue: 12m
|
||||
name: cpu
|
||||
type: Resource
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
creationTimestamp: '2022-05-13T12:39:31Z'
|
||||
name: sample
|
||||
uid: 0e6d855e-83ed-4ed5-b80a-461a750f14db
|
||||
spec:
|
||||
maxReplicas: 2
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: argocd-server
|
||||
targetCPUUtilizationPercentage: 80
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-05-13T12:40:34Z'
|
||||
message: recommended size matches current size
|
||||
reason: ReadyForNewScale
|
||||
status: 'True'
|
||||
type: AbleToScale
|
||||
- lastTransitionTime: '2022-05-13T12:40:33Z'
|
||||
message: >-
|
||||
the HPA was able to successfully calculate a replica count from cpu
|
||||
resource utilization (percentage of request)
|
||||
reason: ValidMetricFound
|
||||
status: 'True'
|
||||
type: ScalingActive
|
||||
- lastTransitionTime: '2022-05-13T12:40:31Z'
|
||||
message: the desired count is within the acceptable range
|
||||
reason: DesiredWithinRange
|
||||
status: 'False'
|
||||
type: ScalingLimited
|
||||
currentMetrics:
|
||||
- resource:
|
||||
current:
|
||||
averageUtilization: 6
|
||||
averageValue: 12m
|
||||
name: cpu
|
||||
type: Resource
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
|
@ -0,0 +1,37 @@
|
|||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
creationTimestamp: '2022-05-13T12:39:31Z'
|
||||
name: sample
|
||||
uid: 0e6d855e-83ed-4ed5-b80a-461a750f14db
|
||||
spec:
|
||||
maxReplicas: 2
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: argocd-server
|
||||
targetCPUUtilizationPercentage: 80
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-05-13T12:40:33Z'
|
||||
message: >-
|
||||
the HPA was able to successfully calculate a replica count from cpu
|
||||
resource utilization (percentage of request)
|
||||
reason: ValidMetricFound
|
||||
status: 'True'
|
||||
type: ScalingActive
|
||||
- lastTransitionTime: '2022-05-13T12:40:31Z'
|
||||
message: the desired count is within the acceptable range
|
||||
reason: DesiredWithinRange
|
||||
status: 'False'
|
||||
type: ScalingLimited
|
||||
currentMetrics:
|
||||
- resource:
|
||||
current:
|
||||
averageUtilization: 6
|
||||
averageValue: 12m
|
||||
name: cpu
|
||||
type: Resource
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
|
@ -0,0 +1,37 @@
|
|||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
creationTimestamp: '2021-09-15T09:31:50Z'
|
||||
name: sample
|
||||
namespace: argocd
|
||||
resourceVersion: '18886245'
|
||||
selfLink: >-
|
||||
/apis/autoscaling/v2beta1/namespaces/argocd/horizontalpodautoscalers/sample
|
||||
uid: c10a6092-1607-11ec-a314-020fc740624d
|
||||
spec:
|
||||
maxReplicas: 3
|
||||
metrics:
|
||||
- resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: 80
|
||||
type: Resource
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: test
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2021-09-15T09:32:05Z'
|
||||
message: the HPA controller was able to get the target's current scale
|
||||
reason: SucceededGetScale
|
||||
status: 'True'
|
||||
type: AbleToScale
|
||||
- lastTransitionTime: '2021-09-15T09:32:05Z'
|
||||
message: scaling is disabled since the replica count of the target is zero
|
||||
reason: ScalingDisabled
|
||||
status: 'False'
|
||||
type: ScalingActive
|
||||
currentMetrics: []
|
||||
currentReplicas: 0
|
||||
desiredReplicas: 0
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
generation: 1
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
|
@ -10,7 +10,7 @@ metadata:
|
|||
name: argocd-server-ingress
|
||||
namespace: argocd
|
||||
resourceVersion: "23207680"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
selfLink: /apis/networking.k8s.io/v1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
|
||||
spec:
|
||||
rules:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
|
@ -10,7 +10,7 @@ metadata:
|
|||
name: argocd-server-ingress
|
||||
namespace: argocd
|
||||
resourceVersion: "23207680"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
selfLink: /apis/networking.k8s.io/v1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
|
||||
spec:
|
||||
rules:
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
creationTimestamp: 2018-12-02T08:19:13Z
|
||||
labels:
|
||||
controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b
|
||||
job-name: succeed
|
||||
name: succeed
|
||||
namespace: argoci-workflows
|
||||
resourceVersion: "46535949"
|
||||
selfLink: /apis/batch/v1/namespaces/argoci-workflows/jobs/succeed
|
||||
uid: f3fe3a46-f60a-11e8-aa53-42010a80021b
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
completions: 1
|
||||
parallelism: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b
|
||||
suspend: true
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b
|
||||
job-name: succeed
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- sh
|
||||
- -c
|
||||
- sleep 10
|
||||
image: alpine:latest
|
||||
imagePullPolicy: Always
|
||||
name: succeed
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Never
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
conditions:
|
||||
- lastProbeTime: "2022-12-08T22:27:20Z"
|
||||
lastTransitionTime: "2022-12-08T22:27:20Z"
|
||||
message: Job suspended
|
||||
reason: JobSuspended
|
||||
status: "True"
|
||||
type: Suspended
|
|
@ -10,7 +10,7 @@ metadata:
|
|||
deletionTimestamp: 2018-12-03T10:16:04Z
|
||||
spec:
|
||||
containers:
|
||||
- image: doesnt-exist
|
||||
- image: does-not-exist
|
||||
imagePullPolicy: Always
|
||||
name: main
|
||||
resources: {}
|
||||
|
@ -59,7 +59,7 @@ status:
|
|||
status: "True"
|
||||
type: PodScheduled
|
||||
containerStatuses:
|
||||
- image: doesnt-exist
|
||||
- image: does-not-exist
|
||||
imageID: ""
|
||||
lastState: {}
|
||||
name: main
|
||||
|
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
uid: 46c1e8de-f61b-11e8-a057-fe5f49266390
|
||||
spec:
|
||||
containers:
|
||||
- image: doesnt-exist
|
||||
- image: does-not-exist
|
||||
imagePullPolicy: Always
|
||||
name: main
|
||||
resources: {}
|
||||
|
@ -58,7 +58,7 @@ status:
|
|||
status: "True"
|
||||
type: PodScheduled
|
||||
containerStatuses:
|
||||
- image: doesnt-exist
|
||||
- image: does-not-exist
|
||||
imageID: ""
|
||||
lastState: {}
|
||||
name: main
|
||||
|
|
|
@ -16,6 +16,7 @@ const (
|
|||
AnnotationKeyHook = "argocd.argoproj.io/hook"
|
||||
// AnnotationKeyHookDeletePolicy is the policy of deleting a hook
|
||||
AnnotationKeyHookDeletePolicy = "argocd.argoproj.io/hook-delete-policy"
|
||||
AnnotationDeletionApproved = "argocd.argoproj.io/deletion-approved"
|
||||
|
||||
// Sync option that disables dry run in resource is missing in the cluster
|
||||
SyncOptionSkipDryRunOnMissingResource = "SkipDryRunOnMissingResource=true"
|
||||
|
@ -27,6 +28,27 @@ const (
|
|||
SyncOptionPruneLast = "PruneLast=true"
|
||||
// Sync option that enables use of replace or create command instead of apply
|
||||
SyncOptionReplace = "Replace=true"
|
||||
// Sync option that enables use of --force flag, delete and re-create
|
||||
SyncOptionForce = "Force=true"
|
||||
// Sync option that enables use of --server-side flag instead of client-side
|
||||
SyncOptionServerSideApply = "ServerSideApply=true"
|
||||
// Sync option that disables use of --server-side flag instead of client-side
|
||||
SyncOptionDisableServerSideApply = "ServerSideApply=false"
|
||||
// Sync option that disables resource deletion
|
||||
SyncOptionDisableDeletion = "Delete=false"
|
||||
// Sync option that sync only out of sync resources
|
||||
SyncOptionApplyOutOfSyncOnly = "ApplyOutOfSyncOnly=true"
|
||||
// Sync option that requires confirmation before deleting the resource
|
||||
SyncOptionDeleteRequireConfirm = "Delete=confirm"
|
||||
// Sync option that requires confirmation before deleting the resource
|
||||
SyncOptionPruneRequireConfirm = "Prune=confirm"
|
||||
// Sync option that enables client-side apply migration
|
||||
SyncOptionClientSideApplyMigration = "ClientSideApplyMigration=true"
|
||||
// Sync option that disables client-side apply migration
|
||||
SyncOptionDisableClientSideApplyMigration = "ClientSideApplyMigration=false"
|
||||
|
||||
// Default field manager for client-side apply migration
|
||||
DefaultClientSideApplyMigrationManager = "kubectl-client-side-apply"
|
||||
)
|
||||
|
||||
type PermissionValidator func(un *unstructured.Unstructured, res *metav1.APIResource) error
|
||||
|
@ -101,7 +123,6 @@ func NewHookType(t string) (HookType, bool) {
|
|||
t == string(HookTypePostSync) ||
|
||||
t == string(HookTypeSyncFail) ||
|
||||
t == string(HookTypeSkip)
|
||||
|
||||
}
|
||||
|
||||
type HookDeletePolicy string
|
||||
|
@ -122,6 +143,10 @@ func NewHookDeletePolicy(p string) (HookDeletePolicy, bool) {
|
|||
type ResourceSyncResult struct {
|
||||
// holds associated resource key
|
||||
ResourceKey kube.ResourceKey
|
||||
// Images holds the images associated with the resource. These images are collected on a best-effort basis
|
||||
// from fields used by known workload resources. This does not necessarily reflect the exact list of images
|
||||
// used by workloads in the application.
|
||||
Images []string
|
||||
// holds resource version
|
||||
Version string
|
||||
// holds the execution order
|
||||
|
|
|
@ -6,58 +6,58 @@ Package implements Kubernetes resources synchronization and provides the followi
|
|||
- sync waves
|
||||
- sync options
|
||||
|
||||
Basic Syncing
|
||||
# Basic Syncing
|
||||
|
||||
Executes equivalent of `kubectl apply` for each specified resource. The apply operations are executed in the predefined
|
||||
order depending of resource type: namespaces, custom resource definitions first and workload resources last.
|
||||
|
||||
Resource Pruning
|
||||
# Resource Pruning
|
||||
|
||||
An ability to delete resources that no longer should exist in the cluster. By default obsolete resources are not deleted
|
||||
and only reported in the sync operation result.
|
||||
|
||||
Resource Hooks
|
||||
# Resource Hooks
|
||||
|
||||
Hooks provide an ability to create resources such as Pod, Job or any other resource, that are 'executed' before, after
|
||||
or even during the synchronization process. Hooks enable use-cases such as database migration and post sync notifications.
|
||||
|
||||
Hooks are regular Kubernetes resources that have `argocd.argoproj.io/hook` annotation:
|
||||
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
generateName: schema-migrate-
|
||||
annotations:
|
||||
argocd.argoproj.io/hook: PreSync
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
generateName: schema-migrate-
|
||||
annotations:
|
||||
argocd.argoproj.io/hook: PreSync
|
||||
|
||||
The annotation value indicates the sync operation phase:
|
||||
|
||||
- PreSync - executes prior to the apply of the manifests.
|
||||
- PostSync - executes after all Sync hooks completed and were successful, a successful apply, and all resources in a Healthy state.
|
||||
- SyncFail - executes when the sync operation fails.
|
||||
- Sync - executes after all PreSync hooks completed and were successful, at the same time as the apply of the manifests.
|
||||
- PreSync - executes prior to the apply of the manifests.
|
||||
- PostSync - executes after all Sync hooks completed and were successful, a successful apply, and all resources in a Healthy state.
|
||||
- SyncFail - executes when the sync operation fails.
|
||||
- Sync - executes after all PreSync hooks completed and were successful, at the same time as the apply of the manifests.
|
||||
|
||||
Named hooks (i.e. ones with /metadata/name) will only be created once. If you want a hook to be re-created each time
|
||||
either use BeforeHookCreation policy (see below) or /metadata/generateName.
|
||||
|
||||
The same resource hook might be executed in several sync phases:
|
||||
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
generateName: schema-migrate-
|
||||
annotations:
|
||||
argocd.argoproj.io/hook: PreSync,PostSync
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
generateName: schema-migrate-
|
||||
annotations:
|
||||
argocd.argoproj.io/hook: PreSync,PostSync
|
||||
|
||||
Hooks can be deleted in an automatic fashion using the annotation: argocd.argoproj.io/hook-delete-policy.
|
||||
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
generateName: integration-test-
|
||||
annotations:
|
||||
argocd.argoproj.io/hook: PostSync
|
||||
argocd.argoproj.io/hook-delete-policy: HookSucceeded
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
generateName: integration-test-
|
||||
annotations:
|
||||
argocd.argoproj.io/hook: PostSync
|
||||
argocd.argoproj.io/hook-delete-policy: HookSucceeded
|
||||
|
||||
The following policies define when the hook will be deleted.
|
||||
|
||||
|
@ -65,17 +65,17 @@ The following policies define when the hook will be deleted.
|
|||
- HookFailed - the hook resource is deleted after the hook failed.
|
||||
- BeforeHookCreation - any existing hook resource is deleted before the new one is created
|
||||
|
||||
Sync Waves
|
||||
# Sync Waves
|
||||
|
||||
The waves allow to group sync execution of syncing process into batches when each batch is executed sequentially one after
|
||||
another. Hooks and resources are assigned to wave zero by default. The wave can be negative, so you can create a wave
|
||||
that runs before all other resources. The `argocd.argoproj.io/sync-wave` annotation assign resource to a wave:
|
||||
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-wave: "5"
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-wave: "5"
|
||||
|
||||
Sync Options
|
||||
# Sync Options
|
||||
|
||||
The sync options allows customizing the synchronization of selected resources. The options are specified using the
|
||||
annotation 'argocd.argoproj.io/sync-options'. Following sync options are supported:
|
||||
|
@ -97,9 +97,8 @@ It then determines which the number of the next wave to apply. This is the first
|
|||
out-of-sync or unhealthy. It applies resources in that wave. It repeats this process until all phases and waves are in
|
||||
in-sync and healthy.
|
||||
|
||||
Example
|
||||
# Example
|
||||
|
||||
Find real-life example in https://github.com/argoproj/gitops-engine/blob/master/pkg/engine/engine.go
|
||||
|
||||
*/
|
||||
package sync
|
||||
|
|
|
@ -6,15 +6,15 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
)
|
||||
|
||||
func TestDeletePolicies(t *testing.T) {
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyBeforeHookCreation}, DeletePolicies(NewPod()))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyBeforeHookCreation}, DeletePolicies(Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "garbage")))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyBeforeHookCreation}, DeletePolicies(Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyHookSucceeded}, DeletePolicies(Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "HookSucceeded")))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyHookFailed}, DeletePolicies(Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "HookFailed")))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyBeforeHookCreation}, DeletePolicies(testingutils.NewPod()))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyBeforeHookCreation}, DeletePolicies(testingutils.Annotate(testingutils.NewPod(), "argocd.argoproj.io/hook-delete-policy", "garbage")))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyBeforeHookCreation}, DeletePolicies(testingutils.Annotate(testingutils.NewPod(), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyHookSucceeded}, DeletePolicies(testingutils.Annotate(testingutils.NewPod(), "argocd.argoproj.io/hook-delete-policy", "HookSucceeded")))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyHookFailed}, DeletePolicies(testingutils.Annotate(testingutils.NewPod(), "argocd.argoproj.io/hook-delete-policy", "HookFailed")))
|
||||
// Helm test
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyHookSucceeded}, DeletePolicies(Annotate(NewPod(), "helm.sh/hook-delete-policy", "hook-succeeded")))
|
||||
assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyHookSucceeded}, DeletePolicies(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook-delete-policy", "hook-succeeded")))
|
||||
}
|
||||
|
|
|
@ -6,14 +6,14 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
)
|
||||
|
||||
func TestDeletePolicies(t *testing.T) {
|
||||
assert.Nil(t, DeletePolicies(NewPod()))
|
||||
assert.Equal(t, []DeletePolicy{BeforeHookCreation}, DeletePolicies(Annotate(NewPod(), "helm.sh/hook-delete-policy", "before-hook-creation")))
|
||||
assert.Equal(t, []DeletePolicy{HookSucceeded}, DeletePolicies(Annotate(NewPod(), "helm.sh/hook-delete-policy", "hook-succeeded")))
|
||||
assert.Equal(t, []DeletePolicy{HookFailed}, DeletePolicies(Annotate(NewPod(), "helm.sh/hook-delete-policy", "hook-failed")))
|
||||
assert.Nil(t, DeletePolicies(testingutils.NewPod()))
|
||||
assert.Equal(t, []DeletePolicy{BeforeHookCreation}, DeletePolicies(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook-delete-policy", "before-hook-creation")))
|
||||
assert.Equal(t, []DeletePolicy{HookSucceeded}, DeletePolicies(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook-delete-policy", "hook-succeeded")))
|
||||
assert.Equal(t, []DeletePolicy{HookFailed}, DeletePolicies(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook-delete-policy", "hook-failed")))
|
||||
}
|
||||
|
||||
func TestDeletePolicy_DeletePolicy(t *testing.T) {
|
||||
|
|
|
@ -5,12 +5,12 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
)
|
||||
|
||||
func TestIsHook(t *testing.T) {
|
||||
assert.False(t, IsHook(NewPod()))
|
||||
assert.True(t, IsHook(Annotate(NewPod(), "helm.sh/hook", "anything")))
|
||||
assert.False(t, IsHook(testingutils.NewPod()))
|
||||
assert.True(t, IsHook(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "anything")))
|
||||
// helm calls "crd-install" a hook, but it really can't be treated as such
|
||||
assert.False(t, IsHook(Annotate(NewCRD(), "helm.sh/hook", "crd-install")))
|
||||
assert.False(t, IsHook(testingutils.Annotate(testingutils.NewCRD(), "helm.sh/hook", "crd-install")))
|
||||
}
|
||||
|
|
|
@ -6,22 +6,22 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
)
|
||||
|
||||
func TestTypes(t *testing.T) {
|
||||
assert.Nil(t, Types(NewPod()))
|
||||
assert.Equal(t, []Type{PreInstall}, Types(Annotate(NewPod(), "helm.sh/hook", "pre-install")))
|
||||
assert.Equal(t, []Type{PreUpgrade}, Types(Annotate(NewPod(), "helm.sh/hook", "pre-upgrade")))
|
||||
assert.Equal(t, []Type{PostUpgrade}, Types(Annotate(NewPod(), "helm.sh/hook", "post-upgrade")))
|
||||
assert.Equal(t, []Type{PostInstall}, Types(Annotate(NewPod(), "helm.sh/hook", "post-install")))
|
||||
assert.Nil(t, Types(testingutils.NewPod()))
|
||||
assert.Equal(t, []Type{PreInstall}, Types(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "pre-install")))
|
||||
assert.Equal(t, []Type{PreUpgrade}, Types(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "pre-upgrade")))
|
||||
assert.Equal(t, []Type{PostUpgrade}, Types(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "post-upgrade")))
|
||||
assert.Equal(t, []Type{PostInstall}, Types(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "post-install")))
|
||||
// helm calls "crd-install" a hook, but it really can't be treated as such
|
||||
assert.Empty(t, Types(Annotate(NewPod(), "helm.sh/hook", "crd-install")))
|
||||
assert.Empty(t, Types(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "crd-install")))
|
||||
// we do not consider these supported hooks
|
||||
assert.Nil(t, Types(Annotate(NewPod(), "helm.sh/hook", "pre-rollback")))
|
||||
assert.Nil(t, Types(Annotate(NewPod(), "helm.sh/hook", "post-rollback")))
|
||||
assert.Nil(t, Types(Annotate(NewPod(), "helm.sh/hook", "test-success")))
|
||||
assert.Nil(t, Types(Annotate(NewPod(), "helm.sh/hook", "test-failure")))
|
||||
assert.Nil(t, Types(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "pre-rollback")))
|
||||
assert.Nil(t, Types(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "post-rollback")))
|
||||
assert.Nil(t, Types(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "test-success")))
|
||||
assert.Nil(t, Types(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "test-failure")))
|
||||
}
|
||||
|
||||
func TestType_HookType(t *testing.T) {
|
||||
|
|
|
@ -3,12 +3,12 @@ package helm
|
|||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWeight(t *testing.T) {
|
||||
assert.Equal(t, Weight(NewPod()), 0)
|
||||
assert.Equal(t, Weight(Annotate(NewPod(), "helm.sh/hook-weight", "1")), 1)
|
||||
assert.Equal(t, 0, Weight(testingutils.NewPod()))
|
||||
assert.Equal(t, 1, Weight(testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook-weight", "1")))
|
||||
}
|
||||
|
|
|
@ -8,6 +8,21 @@ import (
|
|||
resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource"
|
||||
)
|
||||
|
||||
const (
|
||||
// HookFinalizer is the finalizer added to hooks to ensure they are deleted only after the sync phase is completed.
|
||||
HookFinalizer = "argocd.argoproj.io/hook-finalizer"
|
||||
)
|
||||
|
||||
func HasHookFinalizer(obj *unstructured.Unstructured) bool {
|
||||
finalizers := obj.GetFinalizers()
|
||||
for _, finalizer := range finalizers {
|
||||
if finalizer == HookFinalizer {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsHook(obj *unstructured.Unstructured) bool {
|
||||
_, ok := obj.GetAnnotations()[common.AnnotationKeyHook]
|
||||
if ok {
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
)
|
||||
|
||||
func TestNoHooks(t *testing.T) {
|
||||
|
@ -74,14 +74,14 @@ func TestGarbageAndHook(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHelmHook(t *testing.T) {
|
||||
obj := Annotate(NewPod(), "helm.sh/hook", "pre-install")
|
||||
obj := testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "pre-install")
|
||||
assert.True(t, IsHook(obj))
|
||||
assert.False(t, Skip(obj))
|
||||
assert.Equal(t, []common.HookType{common.HookTypePreSync}, Types(obj))
|
||||
}
|
||||
|
||||
func TestGarbageHelmHook(t *testing.T) {
|
||||
obj := Annotate(NewPod(), "helm.sh/hook", "garbage")
|
||||
obj := testingutils.Annotate(testingutils.NewPod(), "helm.sh/hook", "garbage")
|
||||
assert.True(t, IsHook(obj))
|
||||
assert.False(t, Skip(obj))
|
||||
assert.Nil(t, Types(obj))
|
||||
|
@ -89,10 +89,10 @@ func TestGarbageHelmHook(t *testing.T) {
|
|||
|
||||
// we should ignore Helm hooks if we have an Argo CD hook
|
||||
func TestBothHooks(t *testing.T) {
|
||||
obj := Annotate(example("Sync"), "helm.sh/hook", "pre-install")
|
||||
obj := testingutils.Annotate(example("Sync"), "helm.sh/hook", "pre-install")
|
||||
assert.Equal(t, []common.HookType{common.HookTypeSync}, Types(obj))
|
||||
}
|
||||
|
||||
func example(hook string) *unstructured.Unstructured {
|
||||
return Annotate(NewPod(), "argocd.argoproj.io/hook", hook)
|
||||
return testingutils.Annotate(testingutils.NewPod(), "argocd.argoproj.io/hook", hook)
|
||||
}
|
||||
|
|
|
@ -9,17 +9,17 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
)
|
||||
|
||||
func newHook(obj *unstructured.Unstructured, hookType common.HookType) *unstructured.Unstructured {
|
||||
return Annotate(obj, "argocd.argoproj.io/hook", string(hookType))
|
||||
return testingutils.Annotate(obj, "argocd.argoproj.io/hook", string(hookType))
|
||||
}
|
||||
|
||||
func TestIgnore(t *testing.T) {
|
||||
assert.False(t, Ignore(NewPod()))
|
||||
assert.False(t, Ignore(newHook(NewPod(), "Sync")))
|
||||
assert.True(t, Ignore(newHook(NewPod(), "garbage")))
|
||||
assert.False(t, Ignore(HelmHook(NewPod(), "pre-install")))
|
||||
assert.True(t, Ignore(HelmHook(NewPod(), "garbage")))
|
||||
assert.False(t, Ignore(testingutils.NewPod()))
|
||||
assert.False(t, Ignore(newHook(testingutils.NewPod(), "Sync")))
|
||||
assert.True(t, Ignore(newHook(testingutils.NewPod(), "garbage")))
|
||||
assert.False(t, Ignore(testingutils.HelmHook(testingutils.NewPod(), "pre-install")))
|
||||
assert.True(t, Ignore(testingutils.HelmHook(testingutils.NewPod(), "garbage")))
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
hookutil "github.com/argoproj/gitops-engine/pkg/sync/hook"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/ignore"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
kubeutil "github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/text"
|
||||
)
|
||||
|
@ -69,25 +68,43 @@ type ReconciliationResult struct {
|
|||
Hooks []*unstructured.Unstructured
|
||||
}
|
||||
|
||||
func Reconcile(targetObjs []*unstructured.Unstructured, liveObjByKey map[kube.ResourceKey]*unstructured.Unstructured, namespace string, resInfo kubeutil.ResourceInfoProvider) ReconciliationResult {
|
||||
func Reconcile(targetObjs []*unstructured.Unstructured, liveObjByKey map[kubeutil.ResourceKey]*unstructured.Unstructured, namespace string, resInfo kubeutil.ResourceInfoProvider) ReconciliationResult {
|
||||
targetObjs, hooks := splitHooks(targetObjs)
|
||||
dedupLiveResources(targetObjs, liveObjByKey)
|
||||
|
||||
managedLiveObj := make([]*unstructured.Unstructured, len(targetObjs))
|
||||
for i, obj := range targetObjs {
|
||||
gvk := obj.GroupVersionKind()
|
||||
|
||||
ns := text.FirstNonEmpty(obj.GetNamespace(), namespace)
|
||||
if namespaced := kubeutil.IsNamespacedOrUnknown(resInfo, obj.GroupVersionKind().GroupKind()); !namespaced {
|
||||
ns = ""
|
||||
|
||||
namespaced, err := resInfo.IsNamespaced(gvk.GroupKind())
|
||||
unknownScope := err != nil
|
||||
|
||||
var keysToCheck []kubeutil.ResourceKey
|
||||
// If we get an error, we don't know whether the resource is namespaced. So we need to check for both in the
|
||||
// live objects. If we don't check for both, then we risk missing the object and deleting it.
|
||||
if namespaced || unknownScope {
|
||||
keysToCheck = append(keysToCheck, kubeutil.NewResourceKey(gvk.Group, gvk.Kind, ns, obj.GetName()))
|
||||
}
|
||||
key := kubeutil.NewResourceKey(gvk.Group, gvk.Kind, ns, obj.GetName())
|
||||
if liveObj, ok := liveObjByKey[key]; ok {
|
||||
managedLiveObj[i] = liveObj
|
||||
delete(liveObjByKey, key)
|
||||
} else {
|
||||
if !namespaced || unknownScope {
|
||||
keysToCheck = append(keysToCheck, kubeutil.NewResourceKey(gvk.Group, gvk.Kind, "", obj.GetName()))
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, key := range keysToCheck {
|
||||
if liveObj, ok := liveObjByKey[key]; ok {
|
||||
managedLiveObj[i] = liveObj
|
||||
delete(liveObjByKey, key)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
managedLiveObj[i] = nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, obj := range liveObjByKey {
|
||||
targetObjs = append(targetObjs, nil)
|
||||
managedLiveObj = append(managedLiveObj, obj)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue