Compare commits
206 Commits
Author | SHA1 | Date |
---|---|---|
|
093aef0dad | |
|
8007df5f6c | |
|
f8f1b61ba3 | |
|
8697b44eea | |
|
69dfa708a6 | |
|
cebed7e704 | |
|
89c110b595 | |
|
90b69e9ae5 | |
|
60c6378a12 | |
|
c7f25189f0 | |
|
9169c08c91 | |
|
d65e9d9227 | |
|
5f90e7b481 | |
|
717b8bfd69 | |
|
c61756277b | |
|
370078d070 | |
|
7258614f50 | |
|
e5ef2e16d8 | |
|
762f9b70f3 | |
|
1265e8382e | |
|
acb47d5407 | |
|
e4cacd37c4 | |
|
a16fb84a8c | |
|
4fd18478f5 | |
|
65db274b8d | |
|
11a5e25708 | |
|
04266647b1 | |
|
cc13a7d417 | |
|
ad846ac0fd | |
|
c323d36706 | |
|
782fb85b94 | |
|
f5aa9e4d10 | |
|
367311bd6f | |
|
70bee6a3a5 | |
|
b111e50082 | |
|
3ef5ab187e | |
|
30f4accb42 | |
|
00472077d3 | |
|
bfdad63e27 | |
|
a093a7627f | |
|
ccee58366a | |
|
edb9faabbf | |
|
7ac688a30f | |
|
f948991e78 | |
|
382663864e | |
|
7e21b91e9d | |
|
d78929e7f6 | |
|
54992bf424 | |
|
8d65e80ecb | |
|
363a7155a5 | |
|
73452f8a58 | |
|
d948e6b41c | |
|
8849c3f30c | |
|
0371401803 | |
|
88c35a9acf | |
|
847cfc9f8b | |
|
9ab0b2ecae | |
|
09e5225f84 | |
|
72bcdda3f0 | |
|
df9b446fd7 | |
|
3d9aab3cdc | |
|
bd7681ae3f | |
|
95e00254f8 | |
|
099cba69bd | |
|
6b2984ebc4 | |
|
7d150d0b6b | |
|
adb68bcaab | |
|
a0c23b4210 | |
|
a22b34675f | |
|
fa0e8d60a3 | |
|
f38075deb3 | |
|
4386ff4b8d | |
|
0be58f261a | |
|
83ce6ca8ce | |
|
1f371a01cf | |
|
a9fd001c11 | |
|
8a3ce6d85c | |
|
0aecd43903 | |
|
86a368824c | |
|
fbecbb86e4 | |
|
1ade3a1998 | |
|
3de313666b | |
|
5fd9f449e7 | |
|
792124280f | |
|
c1e23597e7 | |
|
aba38192fb | |
|
c0c2dd1f6f | |
|
4a5648ee41 | |
|
f15cf615b8 | |
|
9a03edb8e7 | |
|
a00ce82f1c | |
|
b0fffe419a | |
|
187312fe86 | |
|
ed7c77a929 | |
|
425d65e076 | |
|
b58645a27c | |
|
c0ffe8428a | |
|
e56739ceba | |
|
ad9a694fe4 | |
|
b4dd8b8c39 | |
|
ed70eac8b7 | |
|
917f5a0f16 | |
|
e284fd71cb | |
|
b371e3bfc5 | |
|
9664cf8123 | |
|
98ccd3d43f | |
|
3951079de1 | |
|
517c1fff4e | |
|
c036d3f6b0 | |
|
ce2fb703a6 | |
|
9970faba81 | |
|
a56a803031 | |
|
2bc3fef13e | |
|
e03364f7dd | |
|
51a33e63f1 | |
|
e5e3a1cf5c | |
|
da6623b2e7 | |
|
112657a1f9 | |
|
ab8fdc7dbd | |
|
2d495813b7 | |
|
d8c17c206f | |
|
6cde7989d5 | |
|
1c4ef33687 | |
|
da6681916f | |
|
67ddccd3cc | |
|
ed31317b27 | |
|
f9456de217 | |
|
7493226dda | |
|
4f069a220a | |
|
b855894da0 | |
|
55bb49480a | |
|
d8b1a12ce6 | |
|
a586397dc3 | |
|
73bcea9c8c | |
|
723667dff7 | |
|
531c0dbb68 | |
|
61c0cc745e | |
|
553ae80972 | |
|
c517b47f2f | |
|
e360551b19 | |
|
8aefb18433 | |
|
b0c5e00ccf | |
|
36e77462ae | |
|
5bbbdfbc69 | |
|
b560016286 | |
|
f6495020a3 | |
|
ae94ad9510 | |
|
c7bab2eeca | |
|
c0b63afb74 | |
|
2565df31d1 | |
|
c8139b3f94 | |
|
27374da031 | |
|
762cb1bc26 | |
|
bc9ce5764f | |
|
e8d9803a2b | |
|
23f41cb849 | |
|
33f542da00 | |
|
57ea690344 | |
|
a4c77d5c70 | |
|
2c97a96cab | |
|
7495c633c3 | |
|
b067bd7463 | |
|
579ea1d764 | |
|
5da9c7eea0 | |
|
6884d330a0 | |
|
ddc92c9bdb | |
|
6911e599ae | |
|
411c8d0f1c | |
|
f0c9d7e75e | |
|
515974410e | |
|
3f38eee773 | |
|
46d1496140 | |
|
73f3e7f01a | |
|
0f8652d4e7 | |
|
ba03b48543 | |
|
09186f3d4f | |
|
b3254f88f4 | |
|
9e414998c8 | |
|
11e322186b | |
|
8e19104276 | |
|
2a9c1448b2 | |
|
1ce2acc845 | |
|
3c778a5431 | |
|
46073c1cd6 | |
|
c1332abf89 | |
|
89ddd0dffb | |
|
a9f11fade3 | |
|
478f8cb207 | |
|
38db8bb691 | |
|
5d680d6b80 | |
|
928245881d | |
|
380f7be5bf | |
|
89cb483bbb | |
|
aae8ded161 | |
|
354817a103 | |
|
c5b7114c50 | |
|
814d79df49 | |
|
0b4199b001 | |
|
bb076f0a89 | |
|
82f0935363 | |
|
ee1772e1dc | |
|
32c6afc4a7 | |
|
dac837751e | |
|
209882714e | |
|
53cbe5f6be | |
|
0bc2f8c395 |
|
@ -0,0 +1,10 @@
|
|||
# All
|
||||
** @argoproj/argocd-approvers
|
||||
|
||||
# Docs
|
||||
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/README.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
|
||||
# CI
|
||||
/.codecov.yml @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
||||
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
|
@ -4,7 +4,23 @@ updates:
|
|||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
groups:
|
||||
dependencies:
|
||||
applies-to: version-updates
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
groups:
|
||||
dependencies:
|
||||
applies-to: version-updates
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
|
|
@ -1,28 +1,30 @@
|
|||
name: CI
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'release-*'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'release-*'
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/cache@v2
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- uses: actions/setup-go@v2.1.3
|
||||
with:
|
||||
go-version: '1.13.6'
|
||||
go-version-file: go.mod
|
||||
- run: go mod tidy
|
||||
- run: make test
|
||||
- uses: actions-contrib/golangci-lint@v1
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
args: run --timeout=5m
|
||||
env:
|
||||
GOROOT: ""
|
||||
- uses: codecov/codecov-action@v1.0.13
|
||||
version: v2.1.6
|
||||
args: --verbose
|
||||
- uses: codecov/codecov-action@ad3126e916f78f00edff4ed0317cf185271ccc2d # v5.4.2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }} #required
|
||||
file: ./coverage.out
|
||||
files: ./coverage.out
|
||||
|
|
|
@ -3,3 +3,5 @@
|
|||
.vscode
|
||||
.idea
|
||||
coverage.out
|
||||
vendor/
|
||||
.tool-versions
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
version: "2"
|
||||
linters:
|
||||
enable:
|
||||
- errorlint
|
||||
- gocritic
|
||||
- gomodguard
|
||||
- importas
|
||||
- misspell
|
||||
- perfsprint
|
||||
- revive
|
||||
- testifylint
|
||||
- thelper
|
||||
- unparam
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- assignOp
|
||||
- exitAfterDefer
|
||||
- typeSwitchVar
|
||||
importas:
|
||||
alias:
|
||||
- pkg: k8s.io/api/apps/v1
|
||||
alias: appsv1
|
||||
- pkg: k8s.io/api/core/v1
|
||||
alias: corev1
|
||||
- pkg: k8s.io/apimachinery/pkg/api/errors
|
||||
alias: apierrors
|
||||
- pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||
alias: apiextensionsv1
|
||||
- pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
alias: metav1
|
||||
- pkg: github.com/argoproj/gitops-engine/pkg/utils/testing
|
||||
alias: testingutils
|
||||
perfsprint:
|
||||
int-conversion: true
|
||||
err-error: true
|
||||
errorf: true
|
||||
sprintf1: true
|
||||
strconcat: true
|
||||
revive:
|
||||
rules:
|
||||
- name: bool-literal-in-expr
|
||||
- name: blank-imports
|
||||
disabled: true
|
||||
- name: context-as-argument
|
||||
arguments:
|
||||
- allowTypesBefore: '*testing.T,testing.TB'
|
||||
- name: context-keys-type
|
||||
disabled: true
|
||||
- name: dot-imports
|
||||
- name: duplicated-imports
|
||||
- name: early-return
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: true
|
||||
- name: error-return
|
||||
- name: error-strings
|
||||
disabled: true
|
||||
- name: errorf
|
||||
- name: identical-branches
|
||||
- name: if-return
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: modifies-parameter
|
||||
- name: optimize-operands-order
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: redundant-import-alias
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: time-equal
|
||||
- name: time-naming
|
||||
disabled: true
|
||||
- name: unexported-return
|
||||
disabled: true
|
||||
- name: unnecessary-stmt
|
||||
- name: unreachable-code
|
||||
- name: unused-parameter
|
||||
- name: use-any
|
||||
- name: useless-break
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
disabled: true
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable:
|
||||
- go-require
|
||||
exclusions:
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- pkg/diff/internal/fieldmanager/borrowed_.*\.go$
|
||||
- internal/kubernetes_vendor
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/argoproj/gitops-engine
|
||||
exclusions:
|
||||
paths:
|
||||
- pkg/diff/internal/fieldmanager/borrowed_.*\.go$
|
||||
- internal/kubernetes_vendor
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.14.3 as builder
|
||||
FROM golang:1.22 AS builder
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
|
@ -12,5 +12,5 @@ COPY . .
|
|||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" -o /dist/gitops ./agent
|
||||
|
||||
|
||||
FROM alpine/git:v2.24.3
|
||||
COPY --from=builder /dist/gitops /usr/local/bin/gitops
|
||||
FROM alpine/git:v2.45.2
|
||||
COPY --from=builder /dist/gitops /usr/local/bin/gitops
|
||||
|
|
3
OWNERS
3
OWNERS
|
@ -5,11 +5,10 @@ owners:
|
|||
approvers:
|
||||
- alexec
|
||||
- alexmt
|
||||
- dthomson25
|
||||
- jannfis
|
||||
- jessesuen
|
||||
- mayzhang2000
|
||||
- rachelwang20
|
||||
|
||||
reviewers:
|
||||
- ash2k
|
||||
- dthomson25
|
||||
|
|
17
README.md
17
README.md
|
@ -1,9 +1,4 @@
|
|||
# GitOps Engine
|
||||
<div align="center">
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
Various GitOps operators address different use-cases and provide different user experiences but all have similar set of core features. The team behind
|
||||
[Argo CD](https://github.com/argoproj/argo-cd) has implemented a reusable library that implements core GitOps features:
|
||||
|
@ -36,16 +31,10 @@ The GitOps Engine follows the [CNCF Code of Conduct](https://github.com/cncf/fou
|
|||
|
||||
If you are as excited about GitOps and one common engine for it as much as we are, please get in touch. If you want to write code that's great, if you want to share feedback, ideas and use-cases, that's great too.
|
||||
|
||||
Find us on the [#gitops channel][gitops-slack] on Kubernetes Slack (get an [invite here][kube-slack]).
|
||||
Find us on the [#argo-cd-contributors][argo-cd-contributors-slack] on CNCF Slack (get an [invite here][cncf-slack]).
|
||||
|
||||
[gitops-slack]: https://kubernetes.slack.com/archives/CBT6N1ASG
|
||||
[kube-slack]: https://slack.k8s.io/
|
||||
|
||||
### Meetings
|
||||
|
||||
The developer team meets regularly, every 1st and 3rd Tuesday of the month, [16:00 UTC](http://time.unitarium.com/utc/16). Instructions, agenda and minutes can be found in [the meeting doc](https://docs.google.com/document/d/17AEZgv6yVuD4HS7_oNPiMKmS7Q6vjkhk6jH0YCELpRk/edit#). The meetings will be recorded and added to this [Youtube playlist](https://www.youtube.com/playlist?list=PLbx4FZ4kOKnvSQP394o5UdF9wL7FaQd-R).
|
||||
|
||||
We look forward to seeing you at our meetings and hearing about your feedback and ideas there!
|
||||
[argo-cd-contributors-slack]: https://cloud-native.slack.com/archives/C020XM04CUW
|
||||
[cncf-slack]: https://slack.cncf.io/
|
||||
|
||||
### Contributing to the effort
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ The main difference is that the agent is syncing one Git repository into the sam
|
|||
|
||||
## Quick Start
|
||||
|
||||
By default the agent is configured to use manifests from [guestbook](https://github.com/argoproj/argocd-example-apps/tree/master/guestbook)
|
||||
By default, the agent is configured to use manifests from [guestbook](https://github.com/argoproj/argocd-example-apps/tree/master/guestbook)
|
||||
directory in https://github.com/argoproj/argocd-example-apps repository.
|
||||
|
||||
The agent supports two modes:
|
||||
|
@ -24,7 +24,7 @@ kubectl apply -f https://raw.githubusercontent.com/argoproj/gitops-engine/master
|
|||
kubectl rollout status deploy/gitops-agent
|
||||
```
|
||||
|
||||
The the agent logs:
|
||||
The agent logs:
|
||||
|
||||
```bash
|
||||
kubectl logs -f deploy/gitops-agent gitops-agent
|
||||
|
@ -56,4 +56,21 @@ Update the container env [variables](https://github.com/kubernetes/git-sync#para
|
|||
|
||||
### Demo Recording
|
||||
|
||||
[](https://asciinema.org/a/FWbvVAiSsiI87wQx2TJbRMlxN)
|
||||
[](https://asciinema.org/a/FWbvVAiSsiI87wQx2TJbRMlxN)
|
||||
|
||||
|
||||
### Profiling
|
||||
|
||||
Using env variables to enable profiling mode, the agent can be started with the following envs:
|
||||
|
||||
```bash
|
||||
export GITOPS_ENGINE_PROFILE=web
|
||||
# optional, default pprofile address is 127.0.0.1:6060
|
||||
export GITOPS_ENGINE_PROFILE_HOST=127.0.0.1
|
||||
export GITOPS_ENGINE_PROFILE_PORT=6060
|
||||
```
|
||||
|
||||
And then you can open profile in the browser(or using [pprof](https://github.com/google/pprof) cmd to generate diagrams):
|
||||
|
||||
- http://127.0.0.1:6060/debug/pprof/goroutine?debug=2
|
||||
- http://127.0.0.1:6060/debug/pprof/mutex?debug=2
|
||||
|
|
|
@ -5,33 +5,40 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/text"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/engine"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
|
||||
_ "net/http/pprof"
|
||||
)
|
||||
|
||||
const (
|
||||
annotationGCMark = "gitops-agent.argoproj.io/gc-mark"
|
||||
envProfile = "GITOPS_ENGINE_PROFILE"
|
||||
envProfileHost = "GITOPS_ENGINE_PROFILE_HOST"
|
||||
envProfilePort = "GITOPS_ENGINE_PROFILE_PORT"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := klogr.New() // Delegates to klog
|
||||
log := textlogger.NewLogger(textlogger.NewConfig())
|
||||
err := newCmd(log).Execute()
|
||||
checkError(err, log)
|
||||
}
|
||||
|
@ -47,7 +54,7 @@ type settings struct {
|
|||
|
||||
func (s *settings) getGCMark(key kube.ResourceKey) string {
|
||||
h := sha256.New()
|
||||
_, _ = h.Write([]byte(fmt.Sprintf("%s/%s", s.repoPath, strings.Join(s.paths, ","))))
|
||||
_, _ = fmt.Fprintf(h, "%s/%s", s.repoPath, strings.Join(s.paths, ","))
|
||||
_, _ = h.Write([]byte(strings.Join([]string{key.Group, key.Kind, key.Name}, "/")))
|
||||
return "sha256." + base64.RawURLEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
@ -57,7 +64,7 @@ func (s *settings) parseManifests() ([]*unstructured.Unstructured, string, error
|
|||
cmd.Dir = s.repoPath
|
||||
revision, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, "", fmt.Errorf("failed to determine git revision: %w", err)
|
||||
}
|
||||
var res []*unstructured.Unstructured
|
||||
for i := range s.paths {
|
||||
|
@ -68,21 +75,21 @@ func (s *settings) parseManifests() ([]*unstructured.Unstructured, string, error
|
|||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if ext := filepath.Ext(info.Name()); ext != ".json" && ext != ".yml" && ext != ".yaml" {
|
||||
if ext := strings.ToLower(filepath.Ext(info.Name())); ext != ".json" && ext != ".yml" && ext != ".yaml" {
|
||||
return nil
|
||||
}
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to read file %s: %w", path, err)
|
||||
}
|
||||
items, err := kube.SplitYAML(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %s: %v", path, err)
|
||||
return fmt.Errorf("failed to parse %s: %w", path, err)
|
||||
}
|
||||
res = append(res, items...)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, "", err
|
||||
return nil, "", fmt.Errorf("failed to parse %s: %w", s.paths[i], err)
|
||||
}
|
||||
}
|
||||
for i := range res {
|
||||
|
@ -96,6 +103,19 @@ func (s *settings) parseManifests() ([]*unstructured.Unstructured, string, error
|
|||
return res, string(revision), nil
|
||||
}
|
||||
|
||||
func StartProfiler(log logr.Logger) {
|
||||
if os.Getenv(envProfile) == "web" {
|
||||
go func() {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
runtime.SetMutexProfileFraction(1)
|
||||
profilePort := text.WithDefault(os.Getenv(envProfilePort), "6060")
|
||||
profileHost := text.WithDefault(os.Getenv(envProfileHost), "127.0.0.1")
|
||||
|
||||
log.Info("pprof", "err", http.ListenAndServe(fmt.Sprintf("%s:%s", profileHost, profilePort), nil))
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func newCmd(log logr.Logger) *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
|
@ -125,10 +145,12 @@ func newCmd(log logr.Logger) *cobra.Command {
|
|||
if namespaced {
|
||||
namespaces = []string{namespace}
|
||||
}
|
||||
|
||||
StartProfiler(log)
|
||||
clusterCache := cache.NewClusterCache(config,
|
||||
cache.SetNamespaces(namespaces),
|
||||
cache.SetLogr(log),
|
||||
cache.SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) {
|
||||
cache.SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, _ bool) (info any, cacheManifest bool) {
|
||||
// store gc mark of every resource
|
||||
gcMark := un.GetAnnotations()[annotationGCMark]
|
||||
info = &resourceInfo{gcMark: un.GetAnnotations()[annotationGCMark]}
|
||||
|
@ -153,7 +175,7 @@ func newCmd(log logr.Logger) *cobra.Command {
|
|||
resync <- true
|
||||
}
|
||||
}()
|
||||
http.HandleFunc("/api/v1/sync", func(writer http.ResponseWriter, request *http.Request) {
|
||||
http.HandleFunc("/api/v1/sync", func(_ http.ResponseWriter, _ *http.Request) {
|
||||
log.Info("Synchronization triggered by API call")
|
||||
resync <- true
|
||||
})
|
||||
|
|
166
go.mod
166
go.mod
|
@ -1,47 +1,135 @@
|
|||
module github.com/argoproj/gitops-engine
|
||||
|
||||
go 1.14
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible
|
||||
github.com/go-logr/logr v0.2.1
|
||||
github.com/golang/mock v1.4.4
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
k8s.io/api v0.19.2
|
||||
k8s.io/apiextensions-apiserver v0.19.2
|
||||
k8s.io/apimachinery v0.19.2
|
||||
k8s.io/cli-runtime v0.19.2
|
||||
k8s.io/client-go v0.19.2
|
||||
k8s.io/klog/v2 v2.2.0
|
||||
k8s.io/kube-aggregator v0.19.2
|
||||
k8s.io/kubectl v0.19.2
|
||||
k8s.io/kubernetes v1.19.2
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/evanphx/json-patch/v5 v5.9.11
|
||||
github.com/go-logr/logr v1.4.3
|
||||
github.com/google/gnostic-models v0.6.9
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
go.uber.org/mock v0.5.2
|
||||
golang.org/x/sync v0.15.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
k8s.io/api v0.33.1
|
||||
k8s.io/apiextensions-apiserver v0.33.1
|
||||
k8s.io/apimachinery v0.33.1
|
||||
k8s.io/cli-runtime v0.33.1
|
||||
k8s.io/client-go v0.33.1
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-aggregator v0.33.1
|
||||
k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a
|
||||
k8s.io/kubectl v0.33.1
|
||||
k8s.io/kubernetes v1.33.1
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.3 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.5.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiserver v0.33.1 // indirect
|
||||
k8s.io/component-base v0.33.1 // indirect
|
||||
k8s.io/component-helpers v0.33.1 // indirect
|
||||
k8s.io/controller-manager v0.33.1 // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
// https://github.com/kubernetes/kubernetes/issues/79384#issuecomment-505627280
|
||||
k8s.io/api => k8s.io/api v0.19.2
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.19.2 // indirect
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.19.2 // indirect
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.19.2
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.19.2
|
||||
k8s.io/client-go => k8s.io/client-go v0.19.2
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.19.2
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.19.2
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.19.2
|
||||
k8s.io/component-base => k8s.io/component-base v0.19.2
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.19.2
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.19.2
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.19.2
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.19.2
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.19.2
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.2
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.19.2
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.19.2
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.19.2
|
||||
k8s.io/metrics => k8s.io/metrics v0.19.2
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.2
|
||||
// After bumping these versions, run hack/update_static_schema.sh in case the schema has changed.
|
||||
k8s.io/api => k8s.io/api v0.33.1
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.33.1
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.33.1
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.33.1
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.33.1
|
||||
k8s.io/client-go => k8s.io/client-go v0.33.1
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.33.1
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.1
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.33.1
|
||||
k8s.io/component-base => k8s.io/component-base v0.33.1
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.33.1
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.33.1
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.33.1
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.33.1
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.1
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.1
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.33.1
|
||||
k8s.io/externaljwt => k8s.io/externaljwt v0.33.1
|
||||
k8s.io/kms => k8s.io/kms v0.33.1
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.33.1
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.33.1
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.33.1
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.1
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.33.1
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.33.1
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.33.1
|
||||
k8s.io/metrics => k8s.io/metrics v0.33.1
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.33.1
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.1
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.33.1
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.33.1
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.33.1
|
||||
)
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euox pipefail
|
||||
|
||||
# Get the k8s library version from go.mod, stripping the trailing newline
|
||||
k8s_lib_version=$(grep "k8s.io/client-go" go.mod | awk '{print $2}' | head -n 1 | tr -d '\n')
|
||||
|
||||
# Download the parser file from the k8s library
|
||||
curl -sL "https://raw.githubusercontent.com/kubernetes/client-go/$k8s_lib_version/applyconfigurations/internal/internal.go" -o pkg/utils/kube/scheme/parser.go
|
||||
|
||||
# Add a line to the beginning of the file saying that this is the script that generated it.
|
||||
sed -i '' '1s/^/\/\/ Code generated by hack\/update_static_schema.sh; DO NOT EDIT.\n\/\/ Everything below is downloaded from applyconfigurations\/internal\/internal.go in kubernetes\/client-go.\n\n/' pkg/utils/kube/scheme/parser.go
|
||||
|
||||
# Replace "package internal" with "package scheme" in the parser file
|
||||
sed -i '' 's/package internal/package scheme/' pkg/utils/kube/scheme/parser.go
|
||||
|
||||
# Replace "func Parser" with "func StaticParser"
|
||||
sed -i '' 's/func Parser/func StaticParser/' pkg/utils/kube/scheme/parser.go
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"hash"
|
||||
"sort"
|
||||
|
||||
hashutil "github.com/argoproj/gitops-engine/internal/kubernetes_vendor/pkg/util/hash"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// LessEndpointAddress compares IP addresses lexicographically and returns true if first argument is lesser than second
|
||||
func LessEndpointAddress(a, b *v1.EndpointAddress) bool {
|
||||
ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP))
|
||||
if ipComparison != 0 {
|
||||
return ipComparison < 0
|
||||
}
|
||||
if b.TargetRef == nil {
|
||||
return false
|
||||
}
|
||||
if a.TargetRef == nil {
|
||||
return true
|
||||
}
|
||||
return a.TargetRef.UID < b.TargetRef.UID
|
||||
}
|
||||
|
||||
// SortSubsets sorts an array of EndpointSubset objects in place. For ease of
|
||||
// use it returns the input slice.
|
||||
func SortSubsets(subsets []v1.EndpointSubset) []v1.EndpointSubset {
|
||||
for i := range subsets {
|
||||
ss := &subsets[i]
|
||||
sort.Sort(addrsByIPAndUID(ss.Addresses))
|
||||
sort.Sort(addrsByIPAndUID(ss.NotReadyAddresses))
|
||||
sort.Sort(portsByHash(ss.Ports))
|
||||
}
|
||||
sort.Sort(subsetsByHash(subsets))
|
||||
return subsets
|
||||
}
|
||||
|
||||
func hashObject(hasher hash.Hash, obj interface{}) []byte {
|
||||
hashutil.DeepHashObject(hasher, obj)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
type subsetsByHash []v1.EndpointSubset
|
||||
|
||||
func (sl subsetsByHash) Len() int { return len(sl) }
|
||||
func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl subsetsByHash) Less(i, j int) bool {
|
||||
hasher := md5.New()
|
||||
h1 := hashObject(hasher, sl[i])
|
||||
h2 := hashObject(hasher, sl[j])
|
||||
return bytes.Compare(h1, h2) < 0
|
||||
}
|
||||
|
||||
type addrsByIPAndUID []v1.EndpointAddress
|
||||
|
||||
func (sl addrsByIPAndUID) Len() int { return len(sl) }
|
||||
func (sl addrsByIPAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl addrsByIPAndUID) Less(i, j int) bool {
|
||||
return LessEndpointAddress(&sl[i], &sl[j])
|
||||
}
|
||||
|
||||
type portsByHash []v1.EndpointPort
|
||||
|
||||
func (sl portsByHash) Len() int { return len(sl) }
|
||||
func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl portsByHash) Less(i, j int) bool {
|
||||
hasher := md5.New()
|
||||
h1 := hashObject(hasher, sl[i])
|
||||
h2 := hashObject(hasher, sl[j])
|
||||
return bytes.Compare(h1, h2) < 0
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hash
|
||||
|
||||
import (
|
||||
"hash"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// DeepHashObject writes specified object to hash using the spew library
|
||||
// which follows pointers and prints actual values of the nested objects
|
||||
// ensuring the hash does not change when a pointer changes.
|
||||
func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) {
|
||||
hasher.Reset()
|
||||
printer := spew.ConfigState{
|
||||
Indent: " ",
|
||||
SortKeys: true,
|
||||
DisableMethods: true,
|
||||
SpewKeys: true,
|
||||
}
|
||||
printer.Fprintf(hasher, "%#v", objectToWrite)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,13 +1,21 @@
|
|||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import cache "github.com/argoproj/gitops-engine/pkg/cache"
|
||||
import kube "github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
import unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
import v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
cache "github.com/argoproj/gitops-engine/pkg/cache"
|
||||
kube "github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
|
||||
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
openapi "k8s.io/kubectl/pkg/util/openapi"
|
||||
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// ClusterCache is an autogenerated mock type for the ClusterCache type
|
||||
type ClusterCache struct {
|
||||
|
@ -18,6 +26,10 @@ type ClusterCache struct {
|
|||
func (_m *ClusterCache) EnsureSynced() error {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for EnsureSynced")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
|
@ -28,16 +40,47 @@ func (_m *ClusterCache) EnsureSynced() error {
|
|||
return r0
|
||||
}
|
||||
|
||||
// GetAPIGroups provides a mock function with given fields:
|
||||
func (_m *ClusterCache) GetAPIGroups() []v1.APIGroup {
|
||||
// FindResources provides a mock function with given fields: namespace, predicates
|
||||
func (_m *ClusterCache) FindResources(namespace string, predicates ...func(*cache.Resource) bool) map[kube.ResourceKey]*cache.Resource {
|
||||
_va := make([]interface{}, len(predicates))
|
||||
for _i := range predicates {
|
||||
_va[_i] = predicates[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, namespace)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for FindResources")
|
||||
}
|
||||
|
||||
var r0 map[kube.ResourceKey]*cache.Resource
|
||||
if rf, ok := ret.Get(0).(func(string, ...func(*cache.Resource) bool) map[kube.ResourceKey]*cache.Resource); ok {
|
||||
r0 = rf(namespace, predicates...)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[kube.ResourceKey]*cache.Resource)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetAPIResources provides a mock function with given fields:
|
||||
func (_m *ClusterCache) GetAPIResources() []kube.APIResourceInfo {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []v1.APIGroup
|
||||
if rf, ok := ret.Get(0).(func() []v1.APIGroup); ok {
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetAPIResources")
|
||||
}
|
||||
|
||||
var r0 []kube.APIResourceInfo
|
||||
if rf, ok := ret.Get(0).(func() []kube.APIResourceInfo); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]v1.APIGroup)
|
||||
r0 = ret.Get(0).([]kube.APIResourceInfo)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -48,6 +91,10 @@ func (_m *ClusterCache) GetAPIGroups() []v1.APIGroup {
|
|||
func (_m *ClusterCache) GetClusterInfo() cache.ClusterInfo {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetClusterInfo")
|
||||
}
|
||||
|
||||
var r0 cache.ClusterInfo
|
||||
if rf, ok := ret.Get(0).(func() cache.ClusterInfo); ok {
|
||||
r0 = rf()
|
||||
|
@ -58,11 +105,39 @@ func (_m *ClusterCache) GetClusterInfo() cache.ClusterInfo {
|
|||
return r0
|
||||
}
|
||||
|
||||
// GetGVKParser provides a mock function with given fields:
|
||||
func (_m *ClusterCache) GetGVKParser() *managedfields.GvkParser {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetGVKParser")
|
||||
}
|
||||
|
||||
var r0 *managedfields.GvkParser
|
||||
if rf, ok := ret.Get(0).(func() *managedfields.GvkParser); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*managedfields.GvkParser)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetManagedLiveObjs provides a mock function with given fields: targetObjs, isManaged
|
||||
func (_m *ClusterCache) GetManagedLiveObjs(targetObjs []*unstructured.Unstructured, isManaged func(*cache.Resource) bool) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
|
||||
ret := _m.Called(targetObjs, isManaged)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetManagedLiveObjs")
|
||||
}
|
||||
|
||||
var r0 map[kube.ResourceKey]*unstructured.Unstructured
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func([]*unstructured.Unstructured, func(*cache.Resource) bool) (map[kube.ResourceKey]*unstructured.Unstructured, error)); ok {
|
||||
return rf(targetObjs, isManaged)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func([]*unstructured.Unstructured, func(*cache.Resource) bool) map[kube.ResourceKey]*unstructured.Unstructured); ok {
|
||||
r0 = rf(targetObjs, isManaged)
|
||||
} else {
|
||||
|
@ -71,7 +146,6 @@ func (_m *ClusterCache) GetManagedLiveObjs(targetObjs []*unstructured.Unstructur
|
|||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func([]*unstructured.Unstructured, func(*cache.Resource) bool) error); ok {
|
||||
r1 = rf(targetObjs, isManaged)
|
||||
} else {
|
||||
|
@ -81,16 +155,20 @@ func (_m *ClusterCache) GetManagedLiveObjs(targetObjs []*unstructured.Unstructur
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// GetNamespaceTopLevelResources provides a mock function with given fields: namespace
|
||||
func (_m *ClusterCache) GetNamespaceTopLevelResources(namespace string) map[kube.ResourceKey]*cache.Resource {
|
||||
ret := _m.Called(namespace)
|
||||
// GetOpenAPISchema provides a mock function with given fields:
|
||||
func (_m *ClusterCache) GetOpenAPISchema() openapi.Resources {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 map[kube.ResourceKey]*cache.Resource
|
||||
if rf, ok := ret.Get(0).(func(string) map[kube.ResourceKey]*cache.Resource); ok {
|
||||
r0 = rf(namespace)
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetOpenAPISchema")
|
||||
}
|
||||
|
||||
var r0 openapi.Resources
|
||||
if rf, ok := ret.Get(0).(func() openapi.Resources); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[kube.ResourceKey]*cache.Resource)
|
||||
r0 = ret.Get(0).(openapi.Resources)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,6 +179,10 @@ func (_m *ClusterCache) GetNamespaceTopLevelResources(namespace string) map[kube
|
|||
func (_m *ClusterCache) GetServerVersion() string {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetServerVersion")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
|
@ -126,14 +208,21 @@ func (_m *ClusterCache) Invalidate(opts ...cache.UpdateSettingsFunc) {
|
|||
func (_m *ClusterCache) IsNamespaced(gk schema.GroupKind) (bool, error) {
|
||||
ret := _m.Called(gk)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for IsNamespaced")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupKind) (bool, error)); ok {
|
||||
return rf(gk)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(schema.GroupKind) bool); ok {
|
||||
r0 = rf(gk)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(schema.GroupKind) error); ok {
|
||||
r1 = rf(gk)
|
||||
} else {
|
||||
|
@ -144,14 +233,23 @@ func (_m *ClusterCache) IsNamespaced(gk schema.GroupKind) (bool, error) {
|
|||
}
|
||||
|
||||
// IterateHierarchy provides a mock function with given fields: key, action
|
||||
func (_m *ClusterCache) IterateHierarchy(key kube.ResourceKey, action func(*cache.Resource, map[kube.ResourceKey]*cache.Resource)) {
|
||||
func (_m *ClusterCache) IterateHierarchy(key kube.ResourceKey, action func(*cache.Resource, map[kube.ResourceKey]*cache.Resource) bool) {
|
||||
_m.Called(key, action)
|
||||
}
|
||||
|
||||
// IterateHierarchyV2 provides a mock function with given fields: keys, action
|
||||
func (_m *ClusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(*cache.Resource, map[kube.ResourceKey]*cache.Resource) bool) {
|
||||
_m.Called(keys, action)
|
||||
}
|
||||
|
||||
// OnEvent provides a mock function with given fields: handler
|
||||
func (_m *ClusterCache) OnEvent(handler cache.OnEventHandler) cache.Unsubscribe {
|
||||
ret := _m.Called(handler)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for OnEvent")
|
||||
}
|
||||
|
||||
var r0 cache.Unsubscribe
|
||||
if rf, ok := ret.Get(0).(func(cache.OnEventHandler) cache.Unsubscribe); ok {
|
||||
r0 = rf(handler)
|
||||
|
@ -164,10 +262,34 @@ func (_m *ClusterCache) OnEvent(handler cache.OnEventHandler) cache.Unsubscribe
|
|||
return r0
|
||||
}
|
||||
|
||||
// OnProcessEventsHandler provides a mock function with given fields: handler
|
||||
func (_m *ClusterCache) OnProcessEventsHandler(handler cache.OnProcessEventsHandler) cache.Unsubscribe {
|
||||
ret := _m.Called(handler)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for OnProcessEventsHandler")
|
||||
}
|
||||
|
||||
var r0 cache.Unsubscribe
|
||||
if rf, ok := ret.Get(0).(func(cache.OnProcessEventsHandler) cache.Unsubscribe); ok {
|
||||
r0 = rf(handler)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(cache.Unsubscribe)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OnResourceUpdated provides a mock function with given fields: handler
|
||||
func (_m *ClusterCache) OnResourceUpdated(handler cache.OnResourceUpdatedHandler) cache.Unsubscribe {
|
||||
ret := _m.Called(handler)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for OnResourceUpdated")
|
||||
}
|
||||
|
||||
var r0 cache.Unsubscribe
|
||||
if rf, ok := ret.Get(0).(func(cache.OnResourceUpdatedHandler) cache.Unsubscribe); ok {
|
||||
r0 = rf(handler)
|
||||
|
@ -179,3 +301,17 @@ func (_m *ClusterCache) OnResourceUpdated(handler cache.OnResourceUpdatedHandler
|
|||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewClusterCache creates a new instance of ClusterCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewClusterCache(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *ClusterCache {
|
||||
mock := &ClusterCache{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
package cache
|
||||
|
||||
// TopLevelResource returns true if resource has no parents
|
||||
func TopLevelResource(r *Resource) bool {
|
||||
return len(r.OwnerRefs) == 0
|
||||
}
|
||||
|
||||
// ResourceOfGroupKind returns predicate that matches resource by specified group and kind
|
||||
func ResourceOfGroupKind(group string, kind string) func(r *Resource) bool {
|
||||
return func(r *Resource) bool {
|
||||
key := r.ResourceKey()
|
||||
return key.Group == group && key.Kind == kind
|
||||
}
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
func TestResourceOfGroupKind(t *testing.T) {
|
||||
deploy := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "deploy",
|
||||
},
|
||||
}
|
||||
service := &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "",
|
||||
Kind: "Service",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service",
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, deploy, service)
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
resources := cluster.FindResources("", ResourceOfGroupKind("apps", "Deployment"))
|
||||
assert.Len(t, resources, 1)
|
||||
assert.NotNil(t, resources[kube.NewResourceKey("apps", "Deployment", "", "deploy")])
|
||||
}
|
||||
|
||||
func TestGetNamespaceResources(t *testing.T) {
|
||||
defaultNamespaceTopLevel1 := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "helm-guestbook1",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
defaultNamespaceTopLevel2 := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "helm-guestbook2",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
kubesystemNamespaceTopLevel2 := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "helm-guestbook3",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, defaultNamespaceTopLevel1, defaultNamespaceTopLevel2, kubesystemNamespaceTopLevel2)
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
resources := cluster.FindResources("default", TopLevelResource)
|
||||
assert.Len(t, resources, 2)
|
||||
assert.Equal(t, "helm-guestbook1", resources[getResourceKey(t, defaultNamespaceTopLevel1)].Ref.Name)
|
||||
assert.Equal(t, "helm-guestbook2", resources[getResourceKey(t, defaultNamespaceTopLevel2)].Ref.Name)
|
||||
|
||||
resources = cluster.FindResources("kube-system", TopLevelResource)
|
||||
assert.Len(t, resources, 1)
|
||||
assert.Equal(t, "helm-guestbook3", resources[getResourceKey(t, kubesystemNamespaceTopLevel2)].Ref.Name)
|
||||
}
|
||||
|
||||
func ExampleNewClusterCache_inspectNamespaceResources() {
|
||||
// kubernetes cluster config here
|
||||
config := &rest.Config{}
|
||||
|
||||
clusterCache := NewClusterCache(config,
|
||||
// cache default namespace only
|
||||
SetNamespaces([]string{"default", "kube-system"}),
|
||||
// configure custom logic to cache resources manifest and additional metadata
|
||||
SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, _ bool) (info any, cacheManifest bool) {
|
||||
// if resource belongs to 'extensions' group then mark if with 'deprecated' label
|
||||
if un.GroupVersionKind().Group == "extensions" {
|
||||
info = []string{"deprecated"}
|
||||
}
|
||||
_, ok := un.GetLabels()["acme.io/my-label"]
|
||||
// cache whole manifest if resource has label
|
||||
cacheManifest = ok
|
||||
return
|
||||
}),
|
||||
)
|
||||
// Ensure cluster is synced before using it
|
||||
if err := clusterCache.EnsureSynced(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Iterate default namespace resources tree
|
||||
for _, root := range clusterCache.FindResources("default", TopLevelResource) {
|
||||
clusterCache.IterateHierarchy(root.ResourceKey(), func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
fmt.Printf("resource: %s, info: %v\n", resource.Ref.String(), resource.Info)
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
|
@ -3,9 +3,9 @@ package cache
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"regexp"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -24,9 +24,8 @@ func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured)
|
|||
gvk := un.GroupVersionKind()
|
||||
|
||||
switch {
|
||||
|
||||
// Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed
|
||||
case gvk.Group == "" && gvk.Kind == kube.EndpointsKind && len(un.GetOwnerReferences()) == 0:
|
||||
case gvk.Group == "" && gvk.Kind == kube.EndpointsKind && len(ownerRefs) == 0:
|
||||
ownerRefs = append(ownerRefs, metav1.OwnerReference{
|
||||
Name: un.GetName(),
|
||||
Kind: kube.ServiceKind,
|
||||
|
@ -34,7 +33,7 @@ func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured)
|
|||
})
|
||||
|
||||
// Special case for Operator Lifecycle Manager ClusterServiceVersion:
|
||||
case un.GroupVersionKind().Group == "operators.coreos.com" && un.GetKind() == "ClusterServiceVersion":
|
||||
case gvk.Group == "operators.coreos.com" && gvk.Kind == "ClusterServiceVersion":
|
||||
if un.GetAnnotations()["olm.operatorGroup"] != "" {
|
||||
ownerRefs = append(ownerRefs, metav1.OwnerReference{
|
||||
Name: un.GetAnnotations()["olm.operatorGroup"],
|
||||
|
@ -44,12 +43,12 @@ func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured)
|
|||
}
|
||||
|
||||
// Edge case: consider auto-created service account tokens as a child of service account objects
|
||||
case un.GetKind() == kube.SecretKind && un.GroupVersionKind().Group == "":
|
||||
case gvk.Kind == kube.SecretKind && gvk.Group == "":
|
||||
if yes, ref := isServiceAccountTokenSecret(un); yes {
|
||||
ownerRefs = append(ownerRefs, ref)
|
||||
}
|
||||
|
||||
case (un.GroupVersionKind().Group == "apps" || un.GroupVersionKind().Group == "extensions") && un.GetKind() == kube.StatefulSetKind:
|
||||
case (gvk.Group == "apps" || gvk.Group == "extensions") && gvk.Kind == kube.StatefulSetKind:
|
||||
if refs, err := isStatefulSetChild(un); err != nil {
|
||||
c.log.Error(err, fmt.Sprintf("Failed to extract StatefulSet %s/%s PVC references", un.GetNamespace(), un.GetName()))
|
||||
} else {
|
||||
|
@ -61,21 +60,21 @@ func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured)
|
|||
}
|
||||
|
||||
func isStatefulSetChild(un *unstructured.Unstructured) (func(kube.ResourceKey) bool, error) {
|
||||
sts := v1.StatefulSet{}
|
||||
sts := appsv1.StatefulSet{}
|
||||
data, err := json.Marshal(un)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to marshal unstructured object: %w", err)
|
||||
}
|
||||
err = json.Unmarshal(data, &sts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to unmarshal statefulset: %w", err)
|
||||
}
|
||||
|
||||
templates := sts.Spec.VolumeClaimTemplates
|
||||
return func(key kube.ResourceKey) bool {
|
||||
if key.Kind == kube.PersistentVolumeClaimKind && key.GroupKind().Group == "" {
|
||||
for _, templ := range templates {
|
||||
if strings.HasPrefix(key.Name, fmt.Sprintf("%s-%s-", templ.Name, un.GetName())) {
|
||||
if match, _ := regexp.MatchString(fmt.Sprintf(`%s-%s-\d+$`, templ.Name, un.GetName()), key.Name); match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func Test_isStatefulSetChild(t *testing.T) {
|
||||
type args struct {
|
||||
un *unstructured.Unstructured
|
||||
}
|
||||
|
||||
statefulSet := &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "StatefulSet",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sw-broker",
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "emqx-data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a new unstructured object from the JSON string
|
||||
un, err := kube.ToUnstructured(statefulSet)
|
||||
require.NoErrorf(t, err, "Failed to convert StatefulSet to unstructured: %v", err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
checkFunc func(func(kube.ResourceKey) bool) bool
|
||||
}{
|
||||
{
|
||||
name: "Valid PVC for sw-broker",
|
||||
args: args{un: un},
|
||||
wantErr: false,
|
||||
checkFunc: func(fn func(kube.ResourceKey) bool) bool {
|
||||
// Check a valid PVC name for "sw-broker"
|
||||
return fn(kube.ResourceKey{Kind: "PersistentVolumeClaim", Name: "emqx-data-sw-broker-0"})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Invalid PVC for sw-broker",
|
||||
args: args{un: un},
|
||||
wantErr: false,
|
||||
checkFunc: func(fn func(kube.ResourceKey) bool) bool {
|
||||
// Check an invalid PVC name that should belong to "sw-broker-internal"
|
||||
return !fn(kube.ResourceKey{Kind: "PersistentVolumeClaim", Name: "emqx-data-sw-broker-internal-0"})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mismatch PVC for sw-broker",
|
||||
args: args{un: &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "StatefulSet",
|
||||
"metadata": map[string]any{
|
||||
"name": "sw-broker",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"volumeClaimTemplates": []any{
|
||||
map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "volume-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantErr: false,
|
||||
checkFunc: func(fn func(kube.ResourceKey) bool) bool {
|
||||
// Check an invalid PVC name for "api-test"
|
||||
return !fn(kube.ResourceKey{Kind: "PersistentVolumeClaim", Name: "volume-2"})
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Execute test cases
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := isStatefulSetChild(tt.args.un)
|
||||
assert.Equal(t, tt.wantErr, err != nil, "isStatefulSetChild() error = %v, wantErr %v", err, tt.wantErr)
|
||||
if err == nil {
|
||||
assert.True(t, tt.checkFunc(got), "Check function failed for %v", tt.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -3,7 +3,9 @@ package cache
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
|
@ -15,13 +17,13 @@ type Resource struct {
|
|||
// ResourceVersion holds most recent observed resource version
|
||||
ResourceVersion string
|
||||
// Resource reference
|
||||
Ref v1.ObjectReference
|
||||
Ref corev1.ObjectReference
|
||||
// References to resource owners
|
||||
OwnerRefs []metav1.OwnerReference
|
||||
// Optional creation timestamp of the resource
|
||||
CreationTimestamp *metav1.Time
|
||||
// Optional additional information about the resource
|
||||
Info interface{}
|
||||
Info any
|
||||
// Optional whole resource manifest
|
||||
Resource *unstructured.Unstructured
|
||||
|
||||
|
@ -35,7 +37,6 @@ func (r *Resource) ResourceKey() kube.ResourceKey {
|
|||
|
||||
func (r *Resource) isParentOf(child *Resource) bool {
|
||||
for i, ownerRef := range child.OwnerRefs {
|
||||
|
||||
// backfill UID of inferred owner child references
|
||||
if ownerRef.UID == "" && r.Ref.Kind == ownerRef.Kind && r.Ref.APIVersion == ownerRef.APIVersion && r.Ref.Name == ownerRef.Name {
|
||||
ownerRef.UID = r.Ref.UID
|
||||
|
@ -85,16 +86,46 @@ func newResourceKeySet(set map[kube.ResourceKey]bool, keys ...kube.ResourceKey)
|
|||
return newSet
|
||||
}
|
||||
|
||||
func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents map[kube.ResourceKey]bool, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource)) {
|
||||
func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents map[kube.ResourceKey]bool, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
|
||||
for childKey, child := range ns {
|
||||
if r.isParentOf(ns[childKey]) {
|
||||
if parents[childKey] {
|
||||
key := r.ResourceKey()
|
||||
action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
|
||||
} else {
|
||||
action(nil, child, ns)
|
||||
_ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
|
||||
} else if action(nil, child, ns) {
|
||||
child.iterateChildren(ns, newResourceKeySet(parents, r.ResourceKey()), action)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// iterateChildrenV2 is a depth-first traversal of the graph of resources starting from the current resource.
|
||||
func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, visited map[kube.ResourceKey]int, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
|
||||
key := r.ResourceKey()
|
||||
if visited[key] == 2 {
|
||||
return
|
||||
}
|
||||
// this indicates that we've started processing this node's children
|
||||
visited[key] = 1
|
||||
defer func() {
|
||||
// this indicates that we've finished processing this node's children
|
||||
visited[key] = 2
|
||||
}()
|
||||
children, ok := graph[key]
|
||||
if !ok || children == nil {
|
||||
return
|
||||
}
|
||||
for _, c := range children {
|
||||
childKey := c.ResourceKey()
|
||||
child := ns[childKey]
|
||||
switch visited[childKey] {
|
||||
case 1:
|
||||
// Since we encountered a node that we're currently processing, we know we have a circular dependency.
|
||||
_ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
|
||||
case 0:
|
||||
if action(nil, child, ns) {
|
||||
child.iterateChildrenV2(graph, ns, visited, action)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,29 +7,29 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
var c = NewClusterCache(&rest.Config{})
|
||||
var cacheTest = NewClusterCache(&rest.Config{})
|
||||
|
||||
func TestIsParentOf(t *testing.T) {
|
||||
child := c.newResource(testPod)
|
||||
parent := c.newResource(testRS)
|
||||
grandParent := c.newResource(testDeploy)
|
||||
child := cacheTest.newResource(mustToUnstructured(testPod1()))
|
||||
parent := cacheTest.newResource(mustToUnstructured(testRS()))
|
||||
grandParent := cacheTest.newResource(mustToUnstructured(testDeploy()))
|
||||
|
||||
assert.True(t, parent.isParentOf(child))
|
||||
assert.False(t, grandParent.isParentOf(child))
|
||||
}
|
||||
|
||||
func TestIsParentOfSameKindDifferentGroupAndUID(t *testing.T) {
|
||||
rs := testRS.DeepCopy()
|
||||
rs.SetAPIVersion("somecrd.io/v1")
|
||||
rs := testRS()
|
||||
rs.APIVersion = "somecrd.io/v1"
|
||||
rs.SetUID("123")
|
||||
child := c.newResource(testPod)
|
||||
invalidParent := c.newResource(rs)
|
||||
child := cacheTest.newResource(mustToUnstructured(testPod1()))
|
||||
invalidParent := cacheTest.newResource(mustToUnstructured(rs))
|
||||
|
||||
assert.False(t, invalidParent.isParentOf(child))
|
||||
}
|
||||
|
||||
func TestIsServiceParentOfEndPointWithTheSameName(t *testing.T) {
|
||||
nonMatchingNameEndPoint := c.newResource(strToUnstructured(`
|
||||
nonMatchingNameEndPoint := cacheTest.newResource(strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
|
@ -37,7 +37,7 @@ metadata:
|
|||
namespace: default
|
||||
`))
|
||||
|
||||
matchingNameEndPoint := c.newResource(strToUnstructured(`
|
||||
matchingNameEndPoint := cacheTest.newResource(strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
|
@ -45,15 +45,15 @@ metadata:
|
|||
namespace: default
|
||||
`))
|
||||
|
||||
parent := c.newResource(testService)
|
||||
parent := cacheTest.newResource(testService)
|
||||
|
||||
assert.True(t, parent.isParentOf(matchingNameEndPoint))
|
||||
assert.Equal(t, parent.Ref.UID, matchingNameEndPoint.OwnerRefs[0].UID)
|
||||
assert.False(t, parent.isParentOf(nonMatchingNameEndPoint))
|
||||
}
|
||||
|
||||
func TestIsServiceAccoountParentOfSecret(t *testing.T) {
|
||||
serviceAccount := c.newResource(strToUnstructured(`
|
||||
func TestIsServiceAccountParentOfSecret(t *testing.T) {
|
||||
serviceAccount := cacheTest.newResource(strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
@ -63,7 +63,7 @@ metadata:
|
|||
secrets:
|
||||
- name: default-token-123
|
||||
`))
|
||||
tokenSecret := c.newResource(strToUnstructured(`
|
||||
tokenSecret := cacheTest.newResource(strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
|
|
|
@ -12,9 +12,13 @@ import (
|
|||
"github.com/argoproj/gitops-engine/pkg/utils/tracing"
|
||||
)
|
||||
|
||||
type noopSettings struct {
|
||||
// NewNoopSettings returns cache settings that has not health customizations and don't filter any resources
|
||||
func NewNoopSettings() *noopSettings {
|
||||
return &noopSettings{}
|
||||
}
|
||||
|
||||
type noopSettings struct{}
|
||||
|
||||
func (f *noopSettings) GetResourceHealth(_ *unstructured.Unstructured) (*health.HealthStatus, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -61,6 +65,14 @@ func SetNamespaces(namespaces []string) UpdateSettingsFunc {
|
|||
}
|
||||
}
|
||||
|
||||
// SetClusterResources specifies if cluster level resource included or not.
|
||||
// Flag is used only if cluster is changed to namespaced mode using SetNamespaces setting
|
||||
func SetClusterResources(val bool) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.clusterResources = val
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfig updates cluster rest config
|
||||
func SetConfig(config *rest.Config) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
|
@ -93,7 +105,24 @@ func SetListSemaphore(listSemaphore WeightedSemaphore) UpdateSettingsFunc {
|
|||
// SetResyncTimeout updates cluster re-sync timeout
|
||||
func SetResyncTimeout(timeout time.Duration) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.resyncTimeout = timeout
|
||||
cache.syncStatus.lock.Lock()
|
||||
defer cache.syncStatus.lock.Unlock()
|
||||
|
||||
cache.syncStatus.resyncTimeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// SetWatchResyncTimeout updates cluster re-sync timeout
|
||||
func SetWatchResyncTimeout(timeout time.Duration) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.watchResyncTimeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// SetClusterSyncRetryTimeout updates cluster sync retry timeout when sync error happens
|
||||
func SetClusterSyncRetryTimeout(timeout time.Duration) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.clusterSyncRetryTimeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,3 +144,42 @@ func SetTracer(tracer tracing.Tracer) UpdateSettingsFunc {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetRetryOptions sets cluster list retry options
|
||||
func SetRetryOptions(maxRetries int32, useBackoff bool, retryFunc ListRetryFunc) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
// Max retries must be at least one
|
||||
if maxRetries < 1 {
|
||||
maxRetries = 1
|
||||
}
|
||||
cache.listRetryLimit = maxRetries
|
||||
cache.listRetryUseBackoff = useBackoff
|
||||
cache.listRetryFunc = retryFunc
|
||||
}
|
||||
}
|
||||
|
||||
// SetRespectRBAC allows to set whether to respect the controller rbac in list/watches
|
||||
func SetRespectRBAC(respectRBAC int) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
// if invalid value is provided disable respect rbac
|
||||
if respectRBAC < RespectRbacDisabled || respectRBAC > RespectRbacStrict {
|
||||
cache.respectRBAC = RespectRbacDisabled
|
||||
} else {
|
||||
cache.respectRBAC = respectRBAC
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetBatchEventsProcessing allows to set whether to process events in batch
|
||||
func SetBatchEventsProcessing(batchProcessing bool) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.batchEventsProcessing = batchProcessing
|
||||
}
|
||||
}
|
||||
|
||||
// SetEventProcessingInterval allows to set the interval for processing events
|
||||
func SetEventProcessingInterval(interval time.Duration) UpdateSettingsFunc {
|
||||
return func(cache *clusterCache) {
|
||||
cache.eventProcessingInterval = interval
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,10 +39,36 @@ func TestSetNamespaces(t *testing.T) {
|
|||
|
||||
func TestSetResyncTimeout(t *testing.T) {
|
||||
cache := NewClusterCache(&rest.Config{})
|
||||
assert.Equal(t, clusterResyncTimeout, cache.resyncTimeout)
|
||||
assert.Equal(t, defaultClusterResyncTimeout, cache.syncStatus.resyncTimeout)
|
||||
|
||||
timeout := 1 * time.Hour
|
||||
cache.Invalidate(SetResyncTimeout(timeout))
|
||||
|
||||
assert.Equal(t, timeout, cache.resyncTimeout)
|
||||
assert.Equal(t, timeout, cache.syncStatus.resyncTimeout)
|
||||
}
|
||||
|
||||
func TestSetWatchResyncTimeout(t *testing.T) {
|
||||
cache := NewClusterCache(&rest.Config{})
|
||||
assert.Equal(t, defaultWatchResyncTimeout, cache.watchResyncTimeout)
|
||||
|
||||
timeout := 30 * time.Minute
|
||||
cache = NewClusterCache(&rest.Config{}, SetWatchResyncTimeout(timeout))
|
||||
assert.Equal(t, timeout, cache.watchResyncTimeout)
|
||||
}
|
||||
|
||||
func TestSetBatchEventsProcessing(t *testing.T) {
|
||||
cache := NewClusterCache(&rest.Config{})
|
||||
assert.False(t, cache.batchEventsProcessing)
|
||||
|
||||
cache.Invalidate(SetBatchEventsProcessing(true))
|
||||
assert.True(t, cache.batchEventsProcessing)
|
||||
}
|
||||
|
||||
func TestSetEventsProcessingInterval(t *testing.T) {
|
||||
cache := NewClusterCache(&rest.Config{})
|
||||
assert.Equal(t, defaultEventProcessingInterval, cache.eventProcessingInterval)
|
||||
|
||||
interval := 1 * time.Second
|
||||
cache.Invalidate(SetEventProcessingInterval(interval))
|
||||
assert.Equal(t, interval, cache.eventProcessingInterval)
|
||||
}
|
||||
|
|
758
pkg/diff/diff.go
758
pkg/diff/diff.go
File diff suppressed because it is too large
Load Diff
|
@ -1,8 +1,13 @@
|
|||
package diff
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
)
|
||||
|
||||
type Option func(*options)
|
||||
|
@ -13,13 +18,20 @@ type options struct {
|
|||
ignoreAggregatedRoles bool
|
||||
normalizer Normalizer
|
||||
log logr.Logger
|
||||
structuredMergeDiff bool
|
||||
gvkParser *managedfields.GvkParser
|
||||
manager string
|
||||
serverSideDiff bool
|
||||
serverSideDryRunner ServerSideDryRunner
|
||||
ignoreMutationWebhook bool
|
||||
}
|
||||
|
||||
func applyOptions(opts []Option) options {
|
||||
o := options{
|
||||
ignoreAggregatedRoles: false,
|
||||
ignoreMutationWebhook: true,
|
||||
normalizer: GetNoopNormalizer(),
|
||||
log: klogr.New(),
|
||||
log: textlogger.NewLogger(textlogger.NewConfig()),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&o)
|
||||
|
@ -27,6 +39,37 @@ func applyOptions(opts []Option) options {
|
|||
return o
|
||||
}
|
||||
|
||||
type KubeApplier interface {
|
||||
ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string) (string, error)
|
||||
}
|
||||
|
||||
// ServerSideDryRunner defines the contract to run a server-side apply in
|
||||
// dryrun mode.
|
||||
type ServerSideDryRunner interface {
|
||||
Run(ctx context.Context, obj *unstructured.Unstructured, manager string) (string, error)
|
||||
}
|
||||
|
||||
// K8sServerSideDryRunner is the Kubernetes implementation of ServerSideDryRunner.
|
||||
type K8sServerSideDryRunner struct {
|
||||
dryrunApplier KubeApplier
|
||||
}
|
||||
|
||||
// NewK8sServerSideDryRunner will instantiate a new K8sServerSideDryRunner with
|
||||
// the given kubeApplier.
|
||||
func NewK8sServerSideDryRunner(kubeApplier KubeApplier) *K8sServerSideDryRunner {
|
||||
return &K8sServerSideDryRunner{
|
||||
dryrunApplier: kubeApplier,
|
||||
}
|
||||
}
|
||||
|
||||
// ServerSideApplyDryRun will invoke a kubernetes server-side apply with the given
|
||||
// obj and the given manager in dryrun mode. Will return the predicted live state
|
||||
// json as string.
|
||||
func (kdr *K8sServerSideDryRunner) Run(ctx context.Context, obj *unstructured.Unstructured, manager string) (string, error) {
|
||||
//nolint:wrapcheck // trivial function, don't bother wrapping
|
||||
return kdr.dryrunApplier.ApplyResource(ctx, obj, cmdutil.DryRunServer, false, false, true, manager)
|
||||
}
|
||||
|
||||
func IgnoreAggregatedRoles(ignore bool) Option {
|
||||
return func(o *options) {
|
||||
o.ignoreAggregatedRoles = ignore
|
||||
|
@ -44,3 +87,39 @@ func WithLogr(log logr.Logger) Option {
|
|||
o.log = log
|
||||
}
|
||||
}
|
||||
|
||||
func WithStructuredMergeDiff(smd bool) Option {
|
||||
return func(o *options) {
|
||||
o.structuredMergeDiff = smd
|
||||
}
|
||||
}
|
||||
|
||||
func WithGVKParser(parser *managedfields.GvkParser) Option {
|
||||
return func(o *options) {
|
||||
o.gvkParser = parser
|
||||
}
|
||||
}
|
||||
|
||||
func WithManager(manager string) Option {
|
||||
return func(o *options) {
|
||||
o.manager = manager
|
||||
}
|
||||
}
|
||||
|
||||
func WithServerSideDiff(ssd bool) Option {
|
||||
return func(o *options) {
|
||||
o.serverSideDiff = ssd
|
||||
}
|
||||
}
|
||||
|
||||
func WithIgnoreMutationWebhook(mw bool) Option {
|
||||
return func(o *options) {
|
||||
o.ignoreMutationWebhook = mw
|
||||
}
|
||||
}
|
||||
|
||||
func WithServerSideDryRunner(ssadr ServerSideDryRunner) Option {
|
||||
return func(o *options) {
|
||||
o.serverSideDryRunner = ssadr
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,2 @@
|
|||
Please check the doc.go file for more details about
|
||||
how to use and maintain the code in this package.
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// EmptyFields represents a set with no paths
|
||||
// It looks like metav1.Fields{Raw: []byte("{}")}
|
||||
var EmptyFields = func() metav1.FieldsV1 {
|
||||
f, err := SetToFields(*fieldpath.NewSet())
|
||||
if err != nil {
|
||||
panic("should never happen")
|
||||
}
|
||||
return f
|
||||
}()
|
||||
|
||||
// FieldsToSet creates a set paths from an input trie of fields
|
||||
func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) {
|
||||
err = s.FromJSON(bytes.NewReader(f.Raw))
|
||||
return s, err
|
||||
}
|
||||
|
||||
// SetToFields creates a trie of fields from an input set of paths
|
||||
func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) {
|
||||
f.Raw, err = s.ToJSON()
|
||||
return f, err
|
||||
}
|
|
@ -0,0 +1,248 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
|
||||
type ManagedInterface interface {
|
||||
// Fields gets the fieldpath.ManagedFields.
|
||||
Fields() fieldpath.ManagedFields
|
||||
|
||||
// Times gets the timestamps associated with each operation.
|
||||
Times() map[string]*metav1.Time
|
||||
}
|
||||
|
||||
type managedStruct struct {
|
||||
fields fieldpath.ManagedFields
|
||||
times map[string]*metav1.Time
|
||||
}
|
||||
|
||||
var _ ManagedInterface = &managedStruct{}
|
||||
|
||||
// Fields implements ManagedInterface.
|
||||
func (m *managedStruct) Fields() fieldpath.ManagedFields {
|
||||
return m.fields
|
||||
}
|
||||
|
||||
// Times implements ManagedInterface.
|
||||
func (m *managedStruct) Times() map[string]*metav1.Time {
|
||||
return m.times
|
||||
}
|
||||
|
||||
// NewEmptyManaged creates an empty ManagedInterface.
|
||||
func NewEmptyManaged() ManagedInterface {
|
||||
return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{})
|
||||
}
|
||||
|
||||
// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation.
|
||||
func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface {
|
||||
return &managedStruct{
|
||||
fields: f,
|
||||
times: t,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveObjectManagedFields removes the ManagedFields from the object
|
||||
// before we merge so that it doesn't appear in the ManagedFields
|
||||
// recursively.
|
||||
func RemoveObjectManagedFields(obj runtime.Object) {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
accessor.SetManagedFields(nil)
|
||||
}
|
||||
|
||||
// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields
|
||||
func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
|
||||
encodedManagedFields, err := encodeManagedFields(managed)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert back managed fields to API: %v", err)
|
||||
}
|
||||
accessor.SetManagedFields(encodedManagedFields)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeManagedFields converts ManagedFields from the wire format (api format)
|
||||
// to the format used by sigs.k8s.io/structured-merge-diff
|
||||
func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) {
|
||||
managed := managedStruct{}
|
||||
managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields))
|
||||
managed.times = make(map[string]*metav1.Time, len(encodedManagedFields))
|
||||
|
||||
for i, encodedVersionedSet := range encodedManagedFields {
|
||||
switch encodedVersionedSet.Operation {
|
||||
case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate:
|
||||
default:
|
||||
return nil, fmt.Errorf("operation must be `Apply` or `Update`")
|
||||
}
|
||||
if len(encodedVersionedSet.APIVersion) < 1 {
|
||||
return nil, fmt.Errorf("apiVersion must not be empty")
|
||||
}
|
||||
switch encodedVersionedSet.FieldsType {
|
||||
case "FieldsV1":
|
||||
// Valid case.
|
||||
case "":
|
||||
return nil, fmt.Errorf("missing fieldsType in managed fields entry %d", i)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i)
|
||||
}
|
||||
manager, err := BuildManagerIdentifier(&encodedVersionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err)
|
||||
}
|
||||
managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err)
|
||||
}
|
||||
managed.times[manager] = encodedVersionedSet.Time
|
||||
}
|
||||
return &managed, nil
|
||||
}
|
||||
|
||||
// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry
|
||||
func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) {
|
||||
encodedManagerCopy := *encodedManager
|
||||
|
||||
// Never include fields type in the manager identifier
|
||||
encodedManagerCopy.FieldsType = ""
|
||||
|
||||
// Never include the fields in the manager identifier
|
||||
encodedManagerCopy.FieldsV1 = nil
|
||||
|
||||
// Never include the time in the manager identifier
|
||||
encodedManagerCopy.Time = nil
|
||||
|
||||
// For appliers, don't include the APIVersion in the manager identifier,
|
||||
// so it will always have the same manager identifier each time it applied.
|
||||
if encodedManager.Operation == metav1.ManagedFieldsOperationApply {
|
||||
encodedManagerCopy.APIVersion = ""
|
||||
}
|
||||
|
||||
// Use the remaining fields to build the manager identifier
|
||||
b, err := json.Marshal(&encodedManagerCopy)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshalling manager identifier: %v", err)
|
||||
}
|
||||
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) {
|
||||
fields := EmptyFields
|
||||
if encodedVersionedSet.FieldsV1 != nil {
|
||||
fields = *encodedVersionedSet.FieldsV1
|
||||
}
|
||||
set, err := FieldsToSet(fields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding set: %v", err)
|
||||
}
|
||||
return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil
|
||||
}
|
||||
|
||||
// encodeManagedFields converts ManagedFields from the format used by
|
||||
// sigs.k8s.io/structured-merge-diff to the wire format (api format)
|
||||
func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) {
|
||||
if len(managed.Fields()) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
encodedManagedFields = []metav1.ManagedFieldsEntry{}
|
||||
for manager := range managed.Fields() {
|
||||
versionedSet := managed.Fields()[manager]
|
||||
v, err := encodeManagerVersionedSet(manager, versionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err)
|
||||
}
|
||||
if t, ok := managed.Times()[manager]; ok {
|
||||
v.Time = t
|
||||
}
|
||||
encodedManagedFields = append(encodedManagedFields, *v)
|
||||
}
|
||||
return sortEncodedManagedFields(encodedManagedFields)
|
||||
}
|
||||
|
||||
func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) {
|
||||
sort.Slice(encodedManagedFields, func(i, j int) bool {
|
||||
p, q := encodedManagedFields[i], encodedManagedFields[j]
|
||||
|
||||
if p.Operation != q.Operation {
|
||||
return p.Operation < q.Operation
|
||||
}
|
||||
|
||||
pSeconds, qSeconds := int64(0), int64(0)
|
||||
if p.Time != nil {
|
||||
pSeconds = p.Time.Unix()
|
||||
}
|
||||
if q.Time != nil {
|
||||
qSeconds = q.Time.Unix()
|
||||
}
|
||||
if pSeconds != qSeconds {
|
||||
return pSeconds < qSeconds
|
||||
}
|
||||
|
||||
if p.Manager != q.Manager {
|
||||
return p.Manager < q.Manager
|
||||
}
|
||||
|
||||
if p.APIVersion != q.APIVersion {
|
||||
return p.APIVersion < q.APIVersion
|
||||
}
|
||||
return p.Subresource < q.Subresource
|
||||
})
|
||||
|
||||
return encodedManagedFields, nil
|
||||
}
|
||||
|
||||
func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) {
|
||||
encodedVersionedSet = &metav1.ManagedFieldsEntry{}
|
||||
|
||||
// Get as many fields as we can from the manager identifier
|
||||
err = json.Unmarshal([]byte(manager), encodedVersionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err)
|
||||
}
|
||||
|
||||
// Get the APIVersion, Operation, and Fields from the VersionedSet
|
||||
encodedVersionedSet.APIVersion = string(versionedSet.APIVersion())
|
||||
if versionedSet.Applied() {
|
||||
encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply
|
||||
}
|
||||
encodedVersionedSet.FieldsType = "FieldsV1"
|
||||
fields, err := SetToFields(*versionedSet.Set())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding set: %v", err)
|
||||
}
|
||||
encodedVersionedSet.FieldsV1 = &fields
|
||||
|
||||
return encodedVersionedSet, nil
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/kube-openapi/pkg/util/proto"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/value"
|
||||
)
|
||||
|
||||
// TypeConverter allows you to convert from runtime.Object to
|
||||
// typed.TypedValue and the other way around.
|
||||
type TypeConverter interface {
|
||||
ObjectToTyped(runtime.Object) (*typed.TypedValue, error)
|
||||
TypedToObject(*typed.TypedValue) (runtime.Object, error)
|
||||
}
|
||||
|
||||
// DeducedTypeConverter is a TypeConverter for CRDs that don't have a
|
||||
// schema. It does implement the same interface though (and create the
|
||||
// same types of objects), so that everything can still work the same.
|
||||
// CRDs are merged with all their fields being "atomic" (lists
|
||||
// included).
|
||||
//
|
||||
// Note that this is not going to be sufficient for converting to/from
|
||||
// CRDs that have a schema defined (we don't support that schema yet).
|
||||
// TODO(jennybuckley): Use the schema provided by a CRD if it exists.
|
||||
type DeducedTypeConverter struct{}
|
||||
|
||||
var _ TypeConverter = DeducedTypeConverter{}
|
||||
|
||||
// ObjectToTyped converts an object into a TypedValue with a "deduced type".
|
||||
func (DeducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) {
|
||||
switch o := obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent())
|
||||
default:
|
||||
return typed.DeducedParseableType.FromStructured(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// TypedToObject transforms the typed value into a runtime.Object. That
|
||||
// is not specific to deduced type.
|
||||
func (DeducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
|
||||
return valueToObject(value.AsValue())
|
||||
}
|
||||
|
||||
type typeConverter struct {
|
||||
parser *managedfields.GvkParser
|
||||
}
|
||||
|
||||
var _ TypeConverter = &typeConverter{}
|
||||
|
||||
// NewTypeConverter builds a TypeConverter from a proto.Models. This
|
||||
// will automatically find the proper version of the object, and the
|
||||
// corresponding schema information.
|
||||
func NewTypeConverter(models proto.Models, preserveUnknownFields bool) (TypeConverter, error) {
|
||||
parser, err := managedfields.NewGVKParser(models, preserveUnknownFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &typeConverter{parser: parser}, nil
|
||||
}
|
||||
|
||||
func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) {
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
t := c.parser.Type(gvk)
|
||||
if t == nil {
|
||||
return nil, newNoCorrespondingTypeError(gvk)
|
||||
}
|
||||
switch o := obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return t.FromUnstructured(o.UnstructuredContent())
|
||||
default:
|
||||
return t.FromStructured(obj)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
|
||||
return valueToObject(value.AsValue())
|
||||
}
|
||||
|
||||
func valueToObject(val value.Value) (runtime.Object, error) {
|
||||
vu := val.Unstructured()
|
||||
switch o := vu.(type) {
|
||||
case map[string]any:
|
||||
return &unstructured.Unstructured{Object: o}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu)
|
||||
}
|
||||
}
|
||||
|
||||
type noCorrespondingTypeErr struct {
|
||||
gvk schema.GroupVersionKind
|
||||
}
|
||||
|
||||
func newNoCorrespondingTypeError(gvk schema.GroupVersionKind) error {
|
||||
return &noCorrespondingTypeErr{gvk: gvk}
|
||||
}
|
||||
|
||||
func (k *noCorrespondingTypeErr) Error() string {
|
||||
return fmt.Sprintf("no corresponding type for %v", k.gvk)
|
||||
}
|
||||
|
||||
func isNoCorrespondingTypeError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := err.(*noCorrespondingTypeErr)
|
||||
return ok
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
)
|
||||
|
||||
// versionConverter is an implementation of
|
||||
// sigs.k8s.io/structured-merge-diff/merge.Converter
|
||||
type versionConverter struct {
|
||||
typeConverter TypeConverter
|
||||
objectConvertor runtime.ObjectConvertor
|
||||
hubGetter func(from schema.GroupVersion) schema.GroupVersion
|
||||
}
|
||||
|
||||
var _ merge.Converter = &versionConverter{}
|
||||
|
||||
// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor.
|
||||
func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
|
||||
return &versionConverter{
|
||||
typeConverter: t,
|
||||
objectConvertor: o,
|
||||
hubGetter: func(from schema.GroupVersion) schema.GroupVersion {
|
||||
return schema.GroupVersion{
|
||||
Group: from.Group,
|
||||
Version: h.Version,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor.
|
||||
func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
|
||||
return &versionConverter{
|
||||
typeConverter: t,
|
||||
objectConvertor: o,
|
||||
hubGetter: func(from schema.GroupVersion) schema.GroupVersion {
|
||||
return h
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter
|
||||
func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) {
|
||||
// Convert the smd typed value to a kubernetes object.
|
||||
objectToConvert, err := v.typeConverter.TypedToObject(object)
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// Parse the target groupVersion.
|
||||
groupVersion, err := schema.ParseGroupVersion(string(version))
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// If attempting to convert to the same version as we already have, just return it.
|
||||
fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion()
|
||||
if fromVersion == groupVersion {
|
||||
return object, nil
|
||||
}
|
||||
|
||||
// Convert to internal
|
||||
internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion))
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// Convert the object into the target version
|
||||
convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion)
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// Convert the object back to a smd typed value and return it.
|
||||
return v.typeConverter.ObjectToTyped(convertedObject)
|
||||
}
|
||||
|
||||
// IsMissingVersionError
|
||||
func (v *versionConverter) IsMissingVersionError(err error) bool {
|
||||
return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err)
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
Package fieldmanager is a special package as its main purpose
|
||||
is to expose the dependencies required by structured-merge-diff
|
||||
library to calculate diffs when server-side apply option is enabled.
|
||||
The dependency tree necessary to have a `merge.Updater` instance
|
||||
isn't trivial to implement and the strategy used is borrowing a copy
|
||||
from Kubernetes apiserver codebase in order to expose the required
|
||||
functionality.
|
||||
|
||||
Below there is a list of borrowed files and a reference to which
|
||||
package/file in Kubernetes they were copied from:
|
||||
|
||||
- borrowed_fields.go: k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/fields.go
|
||||
- borrowed_managedfields.go: k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go
|
||||
- borrowed_typeconverter.go: k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go
|
||||
- borrowed_versionconverter.go: k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/versionconverter.go
|
||||
|
||||
In order to keep maintenance as minimal as possible the borrowed
|
||||
files are verbatim copy from Kubernetes. The private objects that
|
||||
need to be exposed are wrapped in the wrapper.go file. Updating
|
||||
the borrowed files should be trivial in most cases but must be done
|
||||
manually as we have no control over future refactorings Kubernetes
|
||||
might do.
|
||||
*/
|
||||
package fieldmanager
|
|
@ -0,0 +1,22 @@
|
|||
package fieldmanager
|
||||
|
||||
/*
|
||||
In order to keep maintenance as minimal as possible the borrowed
|
||||
files in this package are verbatim copy from Kubernetes. The
|
||||
private objects that need to be exposed are wrapped and exposed
|
||||
in this file.
|
||||
*/
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
)
|
||||
|
||||
// NewVersionConverter will expose the version converter from the
|
||||
// borrowed private function from k8s apiserver handler.
|
||||
func NewVersionConverter(gvkParser *managedfields.GvkParser, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
|
||||
tc := &typeConverter{parser: gvkParser}
|
||||
return newVersionConverter(tc, o, h)
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
// Code generated by mockery v2.38.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// ServerSideDryRunner is an autogenerated mock type for the ServerSideDryRunner type
|
||||
type ServerSideDryRunner struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Run provides a mock function with given fields: ctx, obj, manager
|
||||
func (_m *ServerSideDryRunner) Run(ctx context.Context, obj *unstructured.Unstructured, manager string) (string, error) {
|
||||
ret := _m.Called(ctx, obj, manager)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Run")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *unstructured.Unstructured, string) (string, error)); ok {
|
||||
return rf(ctx, obj, manager)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *unstructured.Unstructured, string) string); ok {
|
||||
r0 = rf(ctx, obj, manager)
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *unstructured.Unstructured, string) error); ok {
|
||||
r1 = rf(ctx, obj, manager)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// NewServerSideDryRunner creates a new instance of ServerSideDryRunner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewServerSideDryRunner(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *ServerSideDryRunner {
|
||||
mock := &ServerSideDryRunner{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
package testdata
|
||||
|
||||
import _ "embed"
|
||||
|
||||
var (
|
||||
//go:embed smd-service-config.yaml
|
||||
ServiceConfigYAML string
|
||||
|
||||
//go:embed smd-service-live.yaml
|
||||
ServiceLiveYAML string
|
||||
|
||||
//go:embed smd-service-config-2-ports.yaml
|
||||
ServiceConfigWith2Ports string
|
||||
|
||||
//go:embed smd-service-live-with-type.yaml
|
||||
LiveServiceWithTypeYAML string
|
||||
|
||||
//go:embed smd-service-config-ports.yaml
|
||||
ServiceConfigWithSamePortsYAML string
|
||||
|
||||
//go:embed smd-deploy-live.yaml
|
||||
DeploymentLiveYAML string
|
||||
|
||||
//go:embed smd-deploy-config.yaml
|
||||
DeploymentConfigYAML string
|
||||
|
||||
//go:embed smd-deploy2-live.yaml
|
||||
Deployment2LiveYAML string
|
||||
|
||||
//go:embed smd-deploy2-config.yaml
|
||||
Deployment2ConfigYAML string
|
||||
|
||||
//go:embed smd-deploy2-predicted-live.json
|
||||
Deployment2PredictedLiveJSONSSD string
|
||||
|
||||
// OpenAPIV2Doc is a binary representation of the openapi
|
||||
// document available in a given k8s instance. To update
|
||||
// this file the following commands can be executed:
|
||||
// kubectl proxy --port=7777 &
|
||||
// curl -s -H Accept:application/com.github.proto-openapi.spec.v2@v1.0+protobuf http://localhost:7777/openapi/v2 > openapiv2.bin
|
||||
//
|
||||
//go:embed openapiv2.bin
|
||||
OpenAPIV2Doc []byte
|
||||
|
||||
//go:embed ssd-service-config.yaml
|
||||
ServiceConfigYAMLSSD string
|
||||
|
||||
//go:embed ssd-service-live.yaml
|
||||
ServiceLiveYAMLSSD string
|
||||
|
||||
//go:embed ssd-service-predicted-live.json
|
||||
ServicePredictedLiveJSONSSD string
|
||||
|
||||
//go:embed ssd-deploy-nested-config.yaml
|
||||
DeploymentNestedConfigYAMLSSD string
|
||||
|
||||
//go:embed ssd-deploy-nested-live.yaml
|
||||
DeploymentNestedLiveYAMLSSD string
|
||||
|
||||
//go:embed ssd-deploy-nested-predicted-live.json
|
||||
DeploymentNestedPredictedLiveJSONSSD string
|
||||
|
||||
//go:embed ssd-deploy-with-manual-apply-config.yaml
|
||||
DeploymentApplyConfigYAMLSSD string
|
||||
|
||||
//go:embed ssd-deploy-with-manual-apply-live.yaml
|
||||
DeploymentApplyLiveYAMLSSD string
|
||||
|
||||
//go:embed ssd-deploy-with-manual-apply-predicted-live.json
|
||||
DeploymentApplyPredictedLiveJSONSSD string
|
||||
|
||||
//go:embed ssd-svc-label-live.yaml
|
||||
ServiceLiveLabelYAMLSSD string
|
||||
|
||||
//go:embed ssd-svc-no-label-config.yaml
|
||||
ServiceConfigNoLabelYAMLSSD string
|
||||
|
||||
//go:embed ssd-svc-no-label-predicted-live.json
|
||||
ServicePredictedLiveNoLabelJSONSSD string
|
||||
)
|
|
@ -35,6 +35,19 @@
|
|||
{
|
||||
"name": "solr-http",
|
||||
"port": 8080
|
||||
},
|
||||
{
|
||||
"name": "solr-https",
|
||||
"port": 8443
|
||||
},
|
||||
{
|
||||
"name": "solr-node",
|
||||
"port": 8983,
|
||||
"protocol": "UDP"
|
||||
},
|
||||
{
|
||||
"name": "solr-zookeeper",
|
||||
"port": 9983
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"metadata": {
|
||||
"annotations": {
|
||||
"description": "A workaround to support a set of backend IPs for solr",
|
||||
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Endpoints\",\"metadata\":{\"annotations\":{\"description\":\"A workaround to support a set of backend IPs for solr\",\"linkerd.io/inject\":\"disabled\"},\"labels\":{\"app.kubernetes.io/instance\":\"guestbook\"},\"name\":\"solrcloud\",\"namespace\":\"default\"},\"subsets\":[{\"addresses\":[{\"ip\":\"172.20.10.97\"},{\"ip\":\"172.20.10.98\"},{\"ip\":\"172.20.10.99\"},{\"ip\":\"172.20.10.100\"},{\"ip\":\"172.20.10.101\"}],\"ports\":[{\"name\":\"solr-http\",\"port\":8080}]}]}\n",
|
||||
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Endpoints\",\"metadata\":{\"annotations\":{\"description\":\"A workaround to support a set of backend IPs for solr\",\"linkerd.io/inject\":\"disabled\"},\"labels\":{\"app.kubernetes.io/instance\":\"guestbook\"},\"name\":\"solrcloud\",\"namespace\":\"default\"},\"subsets\":[{\"addresses\":[{\"ip\":\"172.20.10.97\"},{\"ip\":\"172.20.10.98\"},{\"ip\":\"172.20.10.99\"},{\"ip\":\"172.20.10.100\"},{\"ip\":\"172.20.10.101\"}],\"ports\":[{\"name\":\"solr-http\",\"port\":8080},{\"name\":\"solr-https\",\"port\":8443},{\"name\":\"solr-node\",\"port\":8983,\"protocol\":\"UDP\"},{\"name\":\"solr-zookeeper\",\"port\":9983}]}]}\n",
|
||||
"linkerd.io/inject": "disabled"
|
||||
},
|
||||
"creationTimestamp": null,
|
||||
|
@ -32,24 +32,17 @@
|
|||
},
|
||||
"manager": "main",
|
||||
"operation": "Update",
|
||||
"time": "2020-10-09T17:26:49Z"
|
||||
"time": null
|
||||
}
|
||||
],
|
||||
"name": "solrcloud",
|
||||
"namespace": "default",
|
||||
"resourceVersion": "139834",
|
||||
"selfLink": "/api/v1/namespaces/default/endpoints/solrcloud",
|
||||
"uid": "f11285f4-987b-4194-bda8-6372b3f3f08f"
|
||||
"resourceVersion": "2336",
|
||||
"uid": "439a86ee-cbf9-4717-9ce3-d44079333a27"
|
||||
},
|
||||
"subsets": [
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "172.20.10.100"
|
||||
},
|
||||
{
|
||||
"ip": "172.20.10.101"
|
||||
},
|
||||
{
|
||||
"ip": "172.20.10.97"
|
||||
},
|
||||
|
@ -58,6 +51,12 @@
|
|||
},
|
||||
{
|
||||
"ip": "172.20.10.99"
|
||||
},
|
||||
{
|
||||
"ip": "172.20.10.100"
|
||||
},
|
||||
{
|
||||
"ip": "172.20.10.101"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
|
@ -65,6 +64,21 @@
|
|||
"name": "solr-http",
|
||||
"port": 8080,
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"name": "solr-https",
|
||||
"port": 8443,
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"name": "solr-node",
|
||||
"port": 8983,
|
||||
"protocol": "UDP"
|
||||
},
|
||||
{
|
||||
"name": "solr-zookeeper",
|
||||
"port": 9983,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,33 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: missing
|
||||
applications.argoproj.io/app-name: nginx
|
||||
something-else: bla
|
||||
name: nginx-deployment
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
applications.argoproj.io/app-name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: 'nginx:1.23.1'
|
||||
imagePullPolicy: Never
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- non-existent-file
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 180
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
|
@ -0,0 +1,149 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: '1'
|
||||
creationTimestamp: '2022-09-18T23:50:25Z'
|
||||
generation: 1
|
||||
labels:
|
||||
app: missing
|
||||
applications.argoproj.io/app-name: nginx
|
||||
something-else: bla
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app': {}
|
||||
'f:applications.argoproj.io/app-name': {}
|
||||
'f:something-else': {}
|
||||
'f:spec':
|
||||
'f:replicas': {}
|
||||
'f:selector': {}
|
||||
'f:template':
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app': {}
|
||||
'f:applications.argoproj.io/app-name': {}
|
||||
'f:spec':
|
||||
'f:containers':
|
||||
'k:{"name":"nginx"}':
|
||||
.: {}
|
||||
'f:image': {}
|
||||
'f:imagePullPolicy': {}
|
||||
'f:livenessProbe':
|
||||
'f:exec':
|
||||
'f:command': {}
|
||||
'f:initialDelaySeconds': {}
|
||||
'f:periodSeconds': {}
|
||||
'f:name': {}
|
||||
'f:ports':
|
||||
'k:{"containerPort":80,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:containerPort': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2022-09-18T23:50:25Z'
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:deployment.kubernetes.io/revision': {}
|
||||
'f:status':
|
||||
'f:availableReplicas': {}
|
||||
'f:conditions':
|
||||
.: {}
|
||||
'k:{"type":"Available"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'k:{"type":"Progressing"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'f:observedGeneration': {}
|
||||
'f:readyReplicas': {}
|
||||
'f:replicas': {}
|
||||
'f:updatedReplicas': {}
|
||||
manager: kube-controller-manager
|
||||
operation: Update
|
||||
subresource: status
|
||||
time: '2022-09-23T18:30:59Z'
|
||||
name: nginx-deployment
|
||||
namespace: default
|
||||
resourceVersion: '7492752'
|
||||
uid: 731f7434-d3d9-47fa-b179-d9368a84f7c9
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 2
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: nginx
|
||||
applications.argoproj.io/app-name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: 'nginx:1.23.1'
|
||||
imagePullPolicy: Never
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- non-existent-file
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 180
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
availableReplicas: 2
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-09-18T23:50:25Z'
|
||||
lastUpdateTime: '2022-09-18T23:50:26Z'
|
||||
message: ReplicaSet "nginx-deployment-6d68ff5f86" has successfully progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
- lastTransitionTime: '2022-09-23T18:30:59Z'
|
||||
lastUpdateTime: '2022-09-23T18:30:59Z'
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: Available
|
||||
observedGeneration: 1
|
||||
readyReplicas: 2
|
||||
replicas: 2
|
||||
updatedReplicas: 2
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: missing
|
||||
applications.argoproj.io/app-name: nginx
|
||||
something-else: bla
|
||||
name: nginx-deployment
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
applications.argoproj.io/app-name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: 'nginx:1.23.1'
|
||||
imagePullPolicy: Never
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- non-existent-file
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 180
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
protocol: UDP
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
|
@ -0,0 +1,161 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: '1'
|
||||
creationTimestamp: '2022-09-18T23:50:25Z'
|
||||
generation: 1
|
||||
labels:
|
||||
app: missing
|
||||
applications.argoproj.io/app-name: nginx
|
||||
something-else: bla
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app': {}
|
||||
'f:applications.argoproj.io/app-name': {}
|
||||
'f:something-else': {}
|
||||
'f:spec':
|
||||
'f:replicas': {}
|
||||
'f:selector': {}
|
||||
'f:template':
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app': {}
|
||||
'f:applications.argoproj.io/app-name': {}
|
||||
'f:spec':
|
||||
'f:containers':
|
||||
'k:{"name":"nginx"}':
|
||||
.: {}
|
||||
'f:image': {}
|
||||
'f:imagePullPolicy': {}
|
||||
'f:livenessProbe':
|
||||
'f:exec':
|
||||
'f:command': {}
|
||||
'f:initialDelaySeconds': {}
|
||||
'f:periodSeconds': {}
|
||||
'f:name': {}
|
||||
'f:ports':
|
||||
'k:{"containerPort":80,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:containerPort': {}
|
||||
'f:protocol': {}
|
||||
'f:resources':
|
||||
'f:requests':
|
||||
'f:cpu': {}
|
||||
'f:memory': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2022-09-18T23:50:25Z'
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:deployment.kubernetes.io/revision': {}
|
||||
'f:status':
|
||||
'f:availableReplicas': {}
|
||||
'f:conditions':
|
||||
.: {}
|
||||
'k:{"type":"Available"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'k:{"type":"Progressing"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'f:observedGeneration': {}
|
||||
'f:readyReplicas': {}
|
||||
'f:replicas': {}
|
||||
'f:updatedReplicas': {}
|
||||
manager: kube-controller-manager
|
||||
operation: Update
|
||||
subresource: status
|
||||
time: '2022-09-23T18:30:59Z'
|
||||
name: nginx-deployment
|
||||
namespace: default
|
||||
resourceVersion: '7492752'
|
||||
uid: 731f7434-d3d9-47fa-b179-d9368a84f7c9
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 2
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: nginx
|
||||
applications.argoproj.io/app-name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: 'nginx:1.23.1'
|
||||
imagePullPolicy: Never
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- non-existent-file
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 180
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
- containerPort: 8081
|
||||
protocol: UDP
|
||||
resources:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
cpu: 500m
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
availableReplicas: 2
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-09-18T23:50:25Z'
|
||||
lastUpdateTime: '2022-09-18T23:50:26Z'
|
||||
message: ReplicaSet "nginx-deployment-6d68ff5f86" has successfully progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
- lastTransitionTime: '2022-09-23T18:30:59Z'
|
||||
lastUpdateTime: '2022-09-23T18:30:59Z'
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: Available
|
||||
observedGeneration: 1
|
||||
readyReplicas: 2
|
||||
replicas: 2
|
||||
updatedReplicas: 2
|
|
@ -0,0 +1,124 @@
|
|||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "missing",
|
||||
"applications.argoproj.io/app-name": "nginx",
|
||||
"something-else": "bla"
|
||||
},
|
||||
"name": "nginx-deployment",
|
||||
"namespace": "default",
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {},
|
||||
"f:something-else": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:replicas": {},
|
||||
"f:selector": {},
|
||||
"f:template": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"nginx\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:imagePullPolicy": {},
|
||||
"f:livenessProbe": {
|
||||
"f:exec": {
|
||||
"f:command": {}
|
||||
},
|
||||
"f:initialDelaySeconds": {},
|
||||
"f:periodSeconds": {}
|
||||
},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
"k:{\"containerPort\":80,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:protocol": {}
|
||||
}
|
||||
},
|
||||
"f:resources": {
|
||||
"f:requests": {
|
||||
"f:cpu": {},
|
||||
"f:memory": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2022-09-18T23:50:25Z"
|
||||
}
|
||||
]
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 2,
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"app": "nginx"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "nginx",
|
||||
"applications.argoproj.io/app-name": "nginx"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"image": "nginx:1.23.1",
|
||||
"imagePullPolicy": "Never",
|
||||
"livenessProbe": {
|
||||
"exec": {
|
||||
"command": [
|
||||
"cat",
|
||||
"non-existent-file"
|
||||
]
|
||||
},
|
||||
"initialDelaySeconds": 5,
|
||||
"periodSeconds": 180
|
||||
},
|
||||
"name": "nginx",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8081,
|
||||
"protocol": "UDP"
|
||||
},
|
||||
{
|
||||
"containerPort": 80,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
"requests": {
|
||||
"memory": "512Mi",
|
||||
"cpu": "500m"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmpk
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
port: 1935
|
||||
targetPort: 1935
|
|
@ -0,0 +1,29 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmpk
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
port: 1935
|
||||
targetPort: 1935
|
||||
- name: rtmpq
|
||||
port: 1935
|
||||
protocol: UDP
|
||||
targetPort: 1935
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: 443
|
||||
- name: http3
|
||||
port: 443
|
||||
protocol: UDP
|
||||
targetPort: 443
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmpk
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
port: 1935
|
||||
targetPort: 1936
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: 443
|
|
@ -0,0 +1,110 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
kubectl.kubernetes.io/last-applied-configuration: >
|
||||
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"argocd.argoproj.io/sync-options":"ServerSideApply=true"},"name":"multiple-protocol-port-svc","namespace":"default"},"spec":{"ports":[{"name":"rtmpk","port":1986,"protocol":"UDP","targetPort":1986},{"name":"rtmp","port":1935,"protocol":"TCP","targetPort":1935},{"name":"rtmpq","port":1935,"protocol":"UDP","targetPort":1935}]}}
|
||||
creationTimestamp: '2022-06-24T19:37:02Z'
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
managedFields:
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
'f:argocd.argoproj.io/sync-options': {}
|
||||
'f:labels':
|
||||
'f:app.kubernetes.io/instance': {}
|
||||
'f:spec':
|
||||
'f:ports':
|
||||
'k:{"port":1935,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":1986,"protocol":"UDP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:protocol': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":443,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
'f:type': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2022-06-30T16:28:09Z'
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:kubectl.kubernetes.io/last-applied-configuration': {}
|
||||
'f:spec':
|
||||
'f:internalTrafficPolicy': {}
|
||||
'f:ports':
|
||||
.: {}
|
||||
'k:{"port":1935,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:protocol': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":1986,"protocol":"UDP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:protocol': {}
|
||||
'f:targetPort': {}
|
||||
'f:sessionAffinity': {}
|
||||
manager: kubectl-client-side-apply
|
||||
operation: Update
|
||||
time: '2022-06-25T04:18:10Z'
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:status':
|
||||
'f:loadBalancer':
|
||||
'f:ingress': {}
|
||||
manager: kube-vpnkit-forwarder
|
||||
operation: Update
|
||||
subresource: status
|
||||
time: '2022-06-29T12:36:34Z'
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
resourceVersion: '2138591'
|
||||
uid: af42e800-bd33-4412-bc77-d204d298613d
|
||||
spec:
|
||||
clusterIP: 10.111.193.74
|
||||
clusterIPs:
|
||||
- 10.111.193.74
|
||||
externalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: rtmpk
|
||||
nodePort: 31648
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
nodePort: 30018
|
||||
port: 1935
|
||||
protocol: TCP
|
||||
targetPort: 1935
|
||||
- name: https
|
||||
nodePort: 31975
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -0,0 +1,83 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
kubectl.kubernetes.io/last-applied-configuration: >
|
||||
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"argocd.argoproj.io/sync-options":"ServerSideApply=true"},"name":"multiple-protocol-port-svc","namespace":"default"},"spec":{"ports":[{"name":"rtmpk","port":1986,"protocol":"UDP","targetPort":1986},{"name":"rtmp","port":1935,"targetPort":1935},{"name":"https","port":443,"targetPort":443}]}}
|
||||
creationTimestamp: '2022-06-24T19:37:02Z'
|
||||
labels:
|
||||
app.kubernetes.io/instance: big-crd
|
||||
managedFields:
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
'f:argocd.argoproj.io/sync-options': {}
|
||||
'f:labels':
|
||||
'f:app.kubernetes.io/instance': {}
|
||||
'f:spec':
|
||||
'f:ports':
|
||||
'k:{"port":1935,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":1986,"protocol":"UDP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:protocol': {}
|
||||
'f:targetPort': {}
|
||||
'k:{"port":443,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2022-06-24T19:45:02Z'
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:argocd.argoproj.io/sync-options': {}
|
||||
'f:kubectl.kubernetes.io/last-applied-configuration': {}
|
||||
'f:spec':
|
||||
'f:internalTrafficPolicy': {}
|
||||
'f:sessionAffinity': {}
|
||||
'f:type': {}
|
||||
manager: kubectl-client-side-apply
|
||||
operation: Update
|
||||
time: '2022-06-24T19:37:02Z'
|
||||
name: multiple-protocol-port-svc
|
||||
namespace: default
|
||||
resourceVersion: '1825080'
|
||||
uid: af42e800-bd33-4412-bc77-d204d298613d
|
||||
spec:
|
||||
clusterIP: 10.111.193.74
|
||||
clusterIPs:
|
||||
- 10.111.193.74
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: rtmpk
|
||||
port: 1986
|
||||
protocol: UDP
|
||||
targetPort: 1986
|
||||
- name: rtmp
|
||||
port: 1935
|
||||
protocol: TCP
|
||||
targetPort: 1935
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nested-test-deployment
|
||||
namespace: default
|
||||
labels:
|
||||
app: nested-test
|
||||
applications.argoproj.io/app-name: nested-app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nested-test
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nested-test
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- name: main-container
|
||||
image: 'nginx:latest'
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
name: https
|
||||
env:
|
||||
- name: ENV_VAR1
|
||||
value: "value1"
|
||||
- name: ENV_VAR2
|
||||
value: "value2"
|
||||
resources:
|
||||
limits:
|
||||
memory: 100Mi
|
|
@ -0,0 +1,70 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nested-test-deployment
|
||||
namespace: default
|
||||
labels:
|
||||
app: nested-test
|
||||
applications.argoproj.io/app-name: nested-app
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: '1'
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
f:app: {}
|
||||
f:applications.argoproj.io/app-name: {}
|
||||
f:spec:
|
||||
f:replicas: {}
|
||||
f:selector: {}
|
||||
f:template:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
f:app: {}
|
||||
f:spec:
|
||||
f:containers:
|
||||
k:{"name":"main-container"}:
|
||||
.: {}
|
||||
f:image: {}
|
||||
f:name: {}
|
||||
f:ports:
|
||||
.: {}
|
||||
k:{"containerPort":80,"protocol":"TCP"}:
|
||||
.: {}
|
||||
f:containerPort: {}
|
||||
f:name: {}
|
||||
f:protocol: {}
|
||||
f:env:
|
||||
.: {}
|
||||
k:{"name":"ENV_VAR1"}:
|
||||
.: {}
|
||||
f:name: {}
|
||||
f:value: {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nested-test
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nested-test
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- name: main-container
|
||||
image: 'nginx:latest'
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: ENV_VAR1
|
||||
value: "value1"
|
||||
resources:
|
||||
limits:
|
||||
memory: "100Mi"
|
|
@ -0,0 +1,131 @@
|
|||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": "nested-test-deployment",
|
||||
"namespace": "default",
|
||||
"labels": {
|
||||
"app": "nested-test",
|
||||
"applications.argoproj.io/app-name": "nested-app"
|
||||
},
|
||||
"annotations": {
|
||||
"deployment.kubernetes.io/revision": "2"
|
||||
},
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:replicas": {},
|
||||
"f:selector": {},
|
||||
"f:template": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"main-container\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
".": {},
|
||||
"k:{\"containerPort\":80,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {},
|
||||
"f:protocol": {}
|
||||
},
|
||||
"k:{\"containerPort\":443,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {},
|
||||
"f:protocol": {}
|
||||
}
|
||||
},
|
||||
"f:env": {
|
||||
".": {},
|
||||
"k:{\"name\":\"ENV_VAR1\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:value": {}
|
||||
},
|
||||
"k:{\"name\":\"ENV_VAR2\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:value": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2023-12-19T00:00:00Z"
|
||||
}
|
||||
]
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"app": "nested-test"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "nested-test"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"automountServiceAccountToken": false,
|
||||
"containers": [
|
||||
{
|
||||
"name": "main-container",
|
||||
"image": "nginx:latest",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 80,
|
||||
"name": "http",
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"containerPort": 443,
|
||||
"name": "https",
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"env": [
|
||||
{
|
||||
"name": "ENV_VAR1",
|
||||
"value": "value1"
|
||||
},
|
||||
{
|
||||
"name": "ENV_VAR2",
|
||||
"value": "value2"
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
"limits": {
|
||||
"memory": "100Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: manual-apply-test-deployment
|
||||
namespace: default
|
||||
labels:
|
||||
app: manual-apply-app
|
||||
applications.argoproj.io/app-name: manual-apply-app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: manual-apply-test
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: manual-apply-test
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- name: main-container
|
||||
image: 'nginx:latest'
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
- containerPort: 40
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
memory: "100Mi"
|
|
@ -0,0 +1,181 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "4"
|
||||
creationTimestamp: "2025-02-25T00:20:45Z"
|
||||
generation: 4
|
||||
labels:
|
||||
app: manual-apply-app
|
||||
applications.argoproj.io/app-name: manual-apply-app
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:annotations: {}
|
||||
f:labels:
|
||||
.: {}
|
||||
f:app: {}
|
||||
f:applications.argoproj.io/app-name: {}
|
||||
f:spec:
|
||||
f:progressDeadlineSeconds: {}
|
||||
f:replicas: {}
|
||||
f:revisionHistoryLimit: {}
|
||||
f:selector: {}
|
||||
f:strategy:
|
||||
f:rollingUpdate:
|
||||
.: {}
|
||||
f:maxSurge: {}
|
||||
f:maxUnavailable: {}
|
||||
f:type: {}
|
||||
f:template:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
.: {}
|
||||
f:app: {}
|
||||
f:spec:
|
||||
f:automountServiceAccountToken: {}
|
||||
f:containers:
|
||||
k:{"name":"main-container"}:
|
||||
.: {}
|
||||
f:image: {}
|
||||
f:imagePullPolicy: {}
|
||||
f:name: {}
|
||||
f:ports:
|
||||
.: {}
|
||||
k:{"containerPort":80,"protocol":"TCP"}:
|
||||
.: {}
|
||||
f:containerPort: {}
|
||||
f:name: {}
|
||||
f:protocol: {}
|
||||
f:resources:
|
||||
.: {}
|
||||
f:limits:
|
||||
.: {}
|
||||
f:memory: {}
|
||||
f:terminationMessagePath: {}
|
||||
f:terminationMessagePolicy: {}
|
||||
f:dnsPolicy: {}
|
||||
f:restartPolicy: {}
|
||||
f:schedulerName: {}
|
||||
f:securityContext: {}
|
||||
f:terminationGracePeriodSeconds: {}
|
||||
manager: argocd-controller
|
||||
operation: Update
|
||||
time: "2025-02-25T01:19:32Z"
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:annotations:
|
||||
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||
f:spec:
|
||||
f:template:
|
||||
f:spec:
|
||||
f:containers:
|
||||
k:{"name":"idle"}:
|
||||
.: {}
|
||||
f:image: {}
|
||||
f:imagePullPolicy: {}
|
||||
f:name: {}
|
||||
f:ports:
|
||||
.: {}
|
||||
k:{"containerPort":8080,"protocol":"TCP"}:
|
||||
.: {}
|
||||
f:containerPort: {}
|
||||
f:name: {}
|
||||
f:protocol: {}
|
||||
f:resources: {}
|
||||
f:terminationMessagePath: {}
|
||||
f:terminationMessagePolicy: {}
|
||||
manager: kubectl-client-side-apply
|
||||
operation: Update
|
||||
time: "2025-02-25T01:29:34Z"
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:annotations:
|
||||
f:deployment.kubernetes.io/revision: {}
|
||||
f:status:
|
||||
f:availableReplicas: {}
|
||||
f:conditions:
|
||||
.: {}
|
||||
k:{"type":"Available"}:
|
||||
.: {}
|
||||
f:lastTransitionTime: {}
|
||||
f:lastUpdateTime: {}
|
||||
f:message: {}
|
||||
f:reason: {}
|
||||
f:status: {}
|
||||
f:type: {}
|
||||
k:{"type":"Progressing"}:
|
||||
.: {}
|
||||
f:lastTransitionTime: {}
|
||||
f:lastUpdateTime: {}
|
||||
f:message: {}
|
||||
f:reason: {}
|
||||
f:status: {}
|
||||
f:type: {}
|
||||
f:observedGeneration: {}
|
||||
f:readyReplicas: {}
|
||||
f:replicas: {}
|
||||
f:updatedReplicas: {}
|
||||
manager: kube-controller-manager
|
||||
operation: Update
|
||||
subresource: status
|
||||
time: "2025-02-25T01:29:44Z"
|
||||
name: manual-apply-test-deployment
|
||||
namespace: default
|
||||
resourceVersion: "46835"
|
||||
uid: c2ff066f-cbbd-408d-a015-85f1b6332193
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: manual-apply-test
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: manual-apply-test
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- image: nginx:latest
|
||||
imagePullPolicy: Always
|
||||
name: main-container
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
memory: 100Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
- image: spurin/idle:latest
|
||||
imagePullPolicy: Always
|
||||
name: idle
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: web
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
memory: 100Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
|
@ -0,0 +1,310 @@
|
|||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"deployment.kubernetes.io/revision": "4",
|
||||
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"manual-apply-app\",\"applications.argoproj.io/app-name\":\"manual-apply-app\"},\"name\":\"manual-apply-test-deployment\",\"namespace\":\"default\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"manual-apply-test\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"manual-apply-test\"}},\"spec\":{\"automountServiceAccountToken\":false,\"containers\":[{\"image\":\"nginx:latest\",\"name\":\"main-container\",\"ports\":[{\"containerPort\":80,\"name\":\"http\"}],\"resources\":{\"limits\":{\"memory\":\"100Mi\"}}},{\"image\":\"spurin/idle:latest\",\"name\":\"idle\",\"ports\":[{\"containerPort\":8080,\"name\":\"web\",\"protocol\":\"TCP\"}]}]}}}}\n"
|
||||
},
|
||||
"creationTimestamp": "2025-02-25T00:20:45Z",
|
||||
"generation": 5,
|
||||
"labels": {
|
||||
"app": "manual-apply-app",
|
||||
"applications.argoproj.io/app-name": "manual-apply-app",
|
||||
"mutation-test": "FROM-MUTATION-WEBHOOK"
|
||||
},
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:replicas": {},
|
||||
"f:selector": {},
|
||||
"f:template": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:automountServiceAccountToken": {},
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"main-container\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
"k:{\"containerPort\":40,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {}
|
||||
},
|
||||
"k:{\"containerPort\":80,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {}
|
||||
}
|
||||
},
|
||||
"f:resources": {
|
||||
"f:limits": {
|
||||
"f:memory": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2025-02-25T01:31:03Z"
|
||||
},
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:annotations": {},
|
||||
"f:labels": {
|
||||
".": {},
|
||||
"f:app": {},
|
||||
"f:applications.argoproj.io/app-name": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:progressDeadlineSeconds": {},
|
||||
"f:replicas": {},
|
||||
"f:revisionHistoryLimit": {},
|
||||
"f:selector": {},
|
||||
"f:strategy": {
|
||||
"f:rollingUpdate": {
|
||||
".": {},
|
||||
"f:maxSurge": {},
|
||||
"f:maxUnavailable": {}
|
||||
},
|
||||
"f:type": {}
|
||||
},
|
||||
"f:template": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
".": {},
|
||||
"f:app": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:automountServiceAccountToken": {},
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"main-container\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:imagePullPolicy": {},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
".": {},
|
||||
"k:{\"containerPort\":80,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {},
|
||||
"f:protocol": {}
|
||||
}
|
||||
},
|
||||
"f:resources": {
|
||||
".": {},
|
||||
"f:limits": {
|
||||
".": {},
|
||||
"f:memory": {}
|
||||
}
|
||||
},
|
||||
"f:terminationMessagePath": {},
|
||||
"f:terminationMessagePolicy": {}
|
||||
}
|
||||
},
|
||||
"f:dnsPolicy": {},
|
||||
"f:restartPolicy": {},
|
||||
"f:schedulerName": {},
|
||||
"f:securityContext": {},
|
||||
"f:terminationGracePeriodSeconds": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Update",
|
||||
"time": "2025-02-25T01:19:32Z"
|
||||
},
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:annotations": {
|
||||
"f:kubectl.kubernetes.io/last-applied-configuration": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:template": {
|
||||
"f:spec": {
|
||||
"f:containers": {
|
||||
"k:{\"name\":\"idle\"}": {
|
||||
".": {},
|
||||
"f:image": {},
|
||||
"f:imagePullPolicy": {},
|
||||
"f:name": {},
|
||||
"f:ports": {
|
||||
".": {},
|
||||
"k:{\"containerPort\":8080,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:containerPort": {},
|
||||
"f:name": {},
|
||||
"f:protocol": {}
|
||||
}
|
||||
},
|
||||
"f:resources": {},
|
||||
"f:terminationMessagePath": {},
|
||||
"f:terminationMessagePolicy": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"manager": "kubectl-client-side-apply",
|
||||
"operation": "Update",
|
||||
"time": "2025-02-25T01:29:34Z"
|
||||
},
|
||||
{
|
||||
"apiVersion": "apps/v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:annotations": {
|
||||
"f:deployment.kubernetes.io/revision": {}
|
||||
}
|
||||
},
|
||||
"f:status": {
|
||||
"f:availableReplicas": {},
|
||||
"f:conditions": {
|
||||
".": {},
|
||||
"k:{\"type\":\"Available\"}": {
|
||||
".": {},
|
||||
"f:lastTransitionTime": {},
|
||||
"f:lastUpdateTime": {},
|
||||
"f:message": {},
|
||||
"f:reason": {},
|
||||
"f:status": {},
|
||||
"f:type": {}
|
||||
},
|
||||
"k:{\"type\":\"Progressing\"}": {
|
||||
".": {},
|
||||
"f:lastTransitionTime": {},
|
||||
"f:lastUpdateTime": {},
|
||||
"f:message": {},
|
||||
"f:reason": {},
|
||||
"f:status": {},
|
||||
"f:type": {}
|
||||
}
|
||||
},
|
||||
"f:observedGeneration": {},
|
||||
"f:readyReplicas": {},
|
||||
"f:replicas": {},
|
||||
"f:updatedReplicas": {}
|
||||
}
|
||||
},
|
||||
"manager": "kube-controller-manager",
|
||||
"operation": "Update",
|
||||
"subresource": "status",
|
||||
"time": "2025-02-25T01:29:44Z"
|
||||
}
|
||||
],
|
||||
"name": "manual-apply-test-deployment",
|
||||
"namespace": "default",
|
||||
"resourceVersion": "46835",
|
||||
"uid": "c2ff066f-cbbd-408d-a015-85f1b6332193"
|
||||
},
|
||||
"spec": {
|
||||
"progressDeadlineSeconds": 600,
|
||||
"replicas": 1,
|
||||
"revisionHistoryLimit": 10,
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"app": "manual-apply-test"
|
||||
}
|
||||
},
|
||||
"strategy": {
|
||||
"rollingUpdate": {
|
||||
"maxSurge": "25%",
|
||||
"maxUnavailable": "25%"
|
||||
},
|
||||
"type": "RollingUpdate"
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"labels": {
|
||||
"app": "manual-apply-test"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"automountServiceAccountToken": false,
|
||||
"containers": [
|
||||
{
|
||||
"image": "nginx:latest",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "main-container",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 80,
|
||||
"name": "http",
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"containerPort": 40,
|
||||
"name": "https",
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
"limits": {
|
||||
"memory": "100Mi"
|
||||
}
|
||||
},
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"terminationMessagePolicy": "File"
|
||||
},
|
||||
{
|
||||
"image": "spurin/idle:latest",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "idle",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8080,
|
||||
"name": "web",
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"resources": {
|
||||
"limits": {
|
||||
"memory": "100Mi"
|
||||
}
|
||||
},
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"terminationMessagePolicy": "File"
|
||||
}
|
||||
],
|
||||
"dnsPolicy": "ClusterFirst",
|
||||
"restartPolicy": "Always",
|
||||
"schedulerName": "default-scheduler",
|
||||
"securityContext": {},
|
||||
"terminationGracePeriodSeconds": 30
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: httpbin
|
||||
name: httpbin-svc
|
||||
namespace: httpbin
|
||||
spec:
|
||||
ports:
|
||||
- name: http-port
|
||||
port: 7777
|
||||
targetPort: 80
|
||||
- name: test
|
||||
port: 333
|
||||
selector:
|
||||
app: httpbin
|
|
@ -0,0 +1,55 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: '2023-12-18T00:34:22Z'
|
||||
labels:
|
||||
app.kubernetes.io/instance: httpbin
|
||||
managedFields:
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
'f:app.kubernetes.io/instance': {}
|
||||
'f:spec':
|
||||
'f:ports':
|
||||
'k:{"port":333,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'k:{"port":7777,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:port': {}
|
||||
'f:targetPort': {}
|
||||
'f:selector': {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: '2023-12-18T00:34:22Z'
|
||||
name: httpbin-svc
|
||||
namespace: httpbin
|
||||
resourceVersion: '2836'
|
||||
uid: 0e898e6f-c275-476d-9b4f-5e96072cc129
|
||||
spec:
|
||||
clusterIP: 10.43.223.115
|
||||
clusterIPs:
|
||||
- 10.43.223.115
|
||||
internalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: http-port
|
||||
port: 7777
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
- name: test
|
||||
port: 333
|
||||
protocol: TCP
|
||||
targetPort: 333
|
||||
selector:
|
||||
app: httpbin
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -0,0 +1,74 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"creationTimestamp": "2023-12-18T00:34:22Z",
|
||||
"labels": {
|
||||
"event": "FROM-MUTATION-WEBHOOK"
|
||||
},
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:spec": {
|
||||
"f:ports": {
|
||||
"k:{\"port\":333,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:port": {}
|
||||
},
|
||||
"k:{\"port\":7777,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:port": {},
|
||||
"f:targetPort": {}
|
||||
}
|
||||
},
|
||||
"f:selector": {}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2023-12-18T00:38:28Z"
|
||||
}
|
||||
],
|
||||
"name": "httpbin-svc",
|
||||
"namespace": "httpbin",
|
||||
"resourceVersion": "2836",
|
||||
"uid": "0e898e6f-c275-476d-9b4f-5e96072cc129"
|
||||
},
|
||||
"spec": {
|
||||
"clusterIP": "10.43.223.115",
|
||||
"clusterIPs": [
|
||||
"10.43.223.115"
|
||||
],
|
||||
"internalTrafficPolicy": "Cluster",
|
||||
"ipFamilies": [
|
||||
"IPv4"
|
||||
],
|
||||
"ipFamilyPolicy": "SingleStack",
|
||||
"ports": [
|
||||
{
|
||||
"name": "http-port",
|
||||
"port": 7777,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 80
|
||||
},
|
||||
{
|
||||
"name": "test",
|
||||
"port": 333,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 333
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "httpbin"
|
||||
},
|
||||
"sessionAffinity": "None",
|
||||
"type": "ClusterIP"
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: "2025-05-16T19:01:22Z"
|
||||
labels:
|
||||
app.kubernetes.io/instance: httpbin
|
||||
delete-me: delete-value
|
||||
managedFields:
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
f:app.kubernetes.io/instance: {}
|
||||
f:delete-me: {}
|
||||
f:spec:
|
||||
f:ports:
|
||||
k:{"port":7777,"protocol":"TCP"}:
|
||||
.: {}
|
||||
f:name: {}
|
||||
f:port: {}
|
||||
f:protocol: {}
|
||||
f:targetPort: {}
|
||||
f:selector: {}
|
||||
manager: argocd-controller
|
||||
operation: Apply
|
||||
time: "2025-05-16T19:01:22Z"
|
||||
name: httpbin-svc
|
||||
namespace: httpbin
|
||||
resourceVersion: "159005"
|
||||
uid: 61a7a0c2-d973-4333-bbd6-c06ba1c00190
|
||||
spec:
|
||||
clusterIP: 10.96.59.144
|
||||
clusterIPs:
|
||||
- 10.96.59.144
|
||||
internalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: http-port
|
||||
port: 7777
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: httpbin
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: httpbin
|
||||
name: httpbin-svc
|
||||
namespace: httpbin
|
||||
spec:
|
||||
ports:
|
||||
- name: http-port
|
||||
port: 7777
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: httpbin
|
|
@ -0,0 +1,69 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"creationTimestamp": "2025-05-16T19:01:22Z",
|
||||
"labels": {
|
||||
"app.kubernetes.io/instance": "httpbin"
|
||||
},
|
||||
"managedFields": [
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"fieldsType": "FieldsV1",
|
||||
"fieldsV1": {
|
||||
"f:metadata": {
|
||||
"f:labels": {
|
||||
"f:app.kubernetes.io/instance": {}
|
||||
}
|
||||
},
|
||||
"f:spec": {
|
||||
"f:ports": {
|
||||
"k:{\"port\":7777,\"protocol\":\"TCP\"}": {
|
||||
".": {},
|
||||
"f:name": {},
|
||||
"f:port": {},
|
||||
"f:protocol": {},
|
||||
"f:targetPort": {}
|
||||
}
|
||||
},
|
||||
"f:selector": {}
|
||||
}
|
||||
},
|
||||
"manager": "argocd-controller",
|
||||
"operation": "Apply",
|
||||
"time": "2025-05-16T19:02:57Z"
|
||||
}
|
||||
],
|
||||
"name": "httpbin-svc",
|
||||
"namespace": "httpbin",
|
||||
"resourceVersion": "159005",
|
||||
"uid": "61a7a0c2-d973-4333-bbd6-c06ba1c00190"
|
||||
},
|
||||
"spec": {
|
||||
"clusterIP": "10.96.59.144",
|
||||
"clusterIPs": [
|
||||
"10.96.59.144"
|
||||
],
|
||||
"internalTrafficPolicy": "Cluster",
|
||||
"ipFamilies": [
|
||||
"IPv4"
|
||||
],
|
||||
"ipFamilyPolicy": "SingleStack",
|
||||
"ports": [
|
||||
{
|
||||
"name": "http-port",
|
||||
"port": 7777,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 80
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "httpbin"
|
||||
},
|
||||
"sessionAffinity": "None",
|
||||
"type": "ClusterIP"
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {}
|
||||
}
|
||||
}
|
|
@ -59,7 +59,7 @@ func NewEngine(config *rest.Config, clusterCache cache.ClusterCache, opts ...Opt
|
|||
func (e *gitOpsEngine) Run() (StopFunc, error) {
|
||||
err := e.cache.EnsureSynced()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to ensure the cache is synced: %w", err)
|
||||
}
|
||||
|
||||
return func() {
|
||||
|
@ -76,21 +76,23 @@ func (e *gitOpsEngine) Sync(ctx context.Context,
|
|||
) ([]common.ResourceSyncResult, error) {
|
||||
managedResources, err := e.cache.GetManagedLiveObjs(resources, isManaged)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get managed live objects: %w", err)
|
||||
}
|
||||
result := sync.Reconcile(resources, managedResources, namespace, e.cache)
|
||||
diffRes, err := diff.DiffArray(result.Target, result.Live, diff.WithLogr(e.log))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to diff objects: %w", err)
|
||||
}
|
||||
opts = append(opts, sync.WithSkipHooks(!diffRes.Modified))
|
||||
syncCtx, err := sync.NewSyncContext(revision, result, e.config, e.config, e.kubectl, namespace, opts...)
|
||||
syncCtx, cleanup, err := sync.NewSyncContext(revision, result, e.config, e.config, e.kubectl, namespace, e.cache.GetOpenAPISchema(), opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to create sync context: %w", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
resUpdated := make(chan bool)
|
||||
unsubscribe := e.cache.OnResourceUpdated(func(newRes *cache.Resource, oldRes *cache.Resource, namespaceResources map[kube.ResourceKey]*cache.Resource) {
|
||||
resIgnore := make(chan struct{})
|
||||
unsubscribe := e.cache.OnResourceUpdated(func(newRes *cache.Resource, oldRes *cache.Resource, _ map[kube.ResourceKey]*cache.Resource) {
|
||||
var key kube.ResourceKey
|
||||
if newRes != nil {
|
||||
key = newRes.ResourceKey()
|
||||
|
@ -98,9 +100,13 @@ func (e *gitOpsEngine) Sync(ctx context.Context,
|
|||
key = oldRes.ResourceKey()
|
||||
}
|
||||
if _, ok := managedResources[key]; ok {
|
||||
resUpdated <- true
|
||||
select {
|
||||
case resUpdated <- true:
|
||||
case <-resIgnore:
|
||||
}
|
||||
}
|
||||
})
|
||||
defer close(resIgnore)
|
||||
defer unsubscribe()
|
||||
for {
|
||||
syncCtx.Sync()
|
||||
|
@ -114,6 +120,7 @@ func (e *gitOpsEngine) Sync(ctx context.Context,
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
syncCtx.Terminate()
|
||||
//nolint:wrapcheck // don't wrap context errors
|
||||
return resources, ctx.Err()
|
||||
case <-time.After(operationRefreshTimeout):
|
||||
case <-resUpdated:
|
||||
|
|
|
@ -2,7 +2,7 @@ package engine
|
|||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/tracing"
|
||||
|
@ -16,7 +16,7 @@ type options struct {
|
|||
}
|
||||
|
||||
func applyOptions(opts []Option) options {
|
||||
log := klogr.New()
|
||||
log := textlogger.NewLogger(textlogger.NewConfig())
|
||||
o := options{
|
||||
log: log,
|
||||
kubectl: &kube.KubectlCmd{
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/hook"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
// Represents resource health status
|
||||
|
@ -41,12 +46,12 @@ var healthOrder = []HealthStatusCode{
|
|||
HealthStatusHealthy,
|
||||
HealthStatusSuspended,
|
||||
HealthStatusProgressing,
|
||||
HealthStatusDegraded,
|
||||
HealthStatusMissing,
|
||||
HealthStatusDegraded,
|
||||
HealthStatusUnknown,
|
||||
}
|
||||
|
||||
// IsWorse returns whether or not the new health status code is a worser condition than the current
|
||||
// IsWorse returns whether or not the new health status code is a worse condition than the current
|
||||
func IsWorse(current, new HealthStatusCode) bool {
|
||||
currentIndex := 0
|
||||
newIndex := 0
|
||||
|
@ -63,7 +68,7 @@ func IsWorse(current, new HealthStatusCode) bool {
|
|||
|
||||
// GetResourceHealth returns the health of a k8s resource
|
||||
func GetResourceHealth(obj *unstructured.Unstructured, healthOverride HealthOverride) (health *HealthStatus, err error) {
|
||||
if obj.GetDeletionTimestamp() != nil {
|
||||
if obj.GetDeletionTimestamp() != nil && !hook.HasHookFinalizer(obj) {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Pending deletion",
|
||||
|
@ -77,72 +82,71 @@ func GetResourceHealth(obj *unstructured.Unstructured, healthOverride HealthOver
|
|||
Status: HealthStatusUnknown,
|
||||
Message: err.Error(),
|
||||
}
|
||||
return health, err
|
||||
return health, fmt.Errorf("failed to get resource health for %s/%s: %w", obj.GetNamespace(), obj.GetName(), err)
|
||||
}
|
||||
if health != nil {
|
||||
return health, nil
|
||||
}
|
||||
}
|
||||
|
||||
gvk := obj.GroupVersionKind()
|
||||
switch gvk.Group {
|
||||
case "apps":
|
||||
switch gvk.Kind {
|
||||
case kube.DeploymentKind:
|
||||
health, err = getDeploymentHealth(obj)
|
||||
case kube.StatefulSetKind:
|
||||
health, err = getStatefulSetHealth(obj)
|
||||
case kube.ReplicaSetKind:
|
||||
health, err = getReplicaSetHealth(obj)
|
||||
case kube.DaemonSetKind:
|
||||
health, err = getDaemonSetHealth(obj)
|
||||
}
|
||||
case "extensions":
|
||||
switch gvk.Kind {
|
||||
case kube.DeploymentKind:
|
||||
health, err = getDeploymentHealth(obj)
|
||||
case kube.IngressKind:
|
||||
health, err = getIngressHealth(obj)
|
||||
case kube.ReplicaSetKind:
|
||||
health, err = getReplicaSetHealth(obj)
|
||||
case kube.DaemonSetKind:
|
||||
health, err = getDaemonSetHealth(obj)
|
||||
}
|
||||
case "argoproj.io":
|
||||
switch gvk.Kind {
|
||||
case "Workflow":
|
||||
health, err = getArgoWorkflowHealth(obj)
|
||||
}
|
||||
case "apiregistration.k8s.io":
|
||||
switch gvk.Kind {
|
||||
case kube.APIServiceKind:
|
||||
health, err = getAPIServiceHealth(obj)
|
||||
}
|
||||
case "networking.k8s.io":
|
||||
switch gvk.Kind {
|
||||
case kube.IngressKind:
|
||||
health, err = getIngressHealth(obj)
|
||||
}
|
||||
case "":
|
||||
switch gvk.Kind {
|
||||
case kube.ServiceKind:
|
||||
health, err = getServiceHealth(obj)
|
||||
case kube.PersistentVolumeClaimKind:
|
||||
health, err = getPVCHealth(obj)
|
||||
case kube.PodKind:
|
||||
health, err = getPodHealth(obj)
|
||||
}
|
||||
case "batch":
|
||||
switch gvk.Kind {
|
||||
case kube.JobKind:
|
||||
health, err = getJobHealth(obj)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
health = &HealthStatus{
|
||||
Status: HealthStatusUnknown,
|
||||
Message: err.Error(),
|
||||
if healthCheck := GetHealthCheckFunc(obj.GroupVersionKind()); healthCheck != nil {
|
||||
if health, err = healthCheck(obj); err != nil {
|
||||
health = &HealthStatus{
|
||||
Status: HealthStatusUnknown,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return health, err
|
||||
}
|
||||
|
||||
// GetHealthCheckFunc returns built-in health check function or nil if health check is not supported
|
||||
func GetHealthCheckFunc(gvk schema.GroupVersionKind) func(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
switch gvk.Group {
|
||||
case "apps":
|
||||
switch gvk.Kind {
|
||||
case kube.DeploymentKind:
|
||||
return getDeploymentHealth
|
||||
case kube.StatefulSetKind:
|
||||
return getStatefulSetHealth
|
||||
case kube.ReplicaSetKind:
|
||||
return getReplicaSetHealth
|
||||
case kube.DaemonSetKind:
|
||||
return getDaemonSetHealth
|
||||
}
|
||||
case "extensions":
|
||||
if gvk.Kind == kube.IngressKind {
|
||||
return getIngressHealth
|
||||
}
|
||||
case "argoproj.io":
|
||||
if gvk.Kind == "Workflow" {
|
||||
return getArgoWorkflowHealth
|
||||
}
|
||||
case "apiregistration.k8s.io":
|
||||
if gvk.Kind == kube.APIServiceKind {
|
||||
return getAPIServiceHealth
|
||||
}
|
||||
case "networking.k8s.io":
|
||||
if gvk.Kind == kube.IngressKind {
|
||||
return getIngressHealth
|
||||
}
|
||||
case "":
|
||||
switch gvk.Kind {
|
||||
case kube.ServiceKind:
|
||||
return getServiceHealth
|
||||
case kube.PersistentVolumeClaimKind:
|
||||
return getPVCHealth
|
||||
case kube.PodKind:
|
||||
return getPodHealth
|
||||
}
|
||||
case "batch":
|
||||
if gvk.Kind == kube.JobKind {
|
||||
return getJobHealth
|
||||
}
|
||||
case "autoscaling":
|
||||
if gvk.Kind == kube.HorizontalPodAutoscalerKind {
|
||||
return getHPAHealth
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -3,11 +3,12 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getAPIServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -17,14 +18,14 @@ func getAPIServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
var apiService apiregistrationv1.APIService
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &apiService)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %w", err)
|
||||
}
|
||||
return getApiregistrationv1APIServiceHealth(&apiService)
|
||||
case apiregistrationv1beta1.SchemeGroupVersion.WithKind(kube.APIServiceKind):
|
||||
var apiService apiregistrationv1beta1.APIService
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &apiService)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %w", err)
|
||||
}
|
||||
return getApiregistrationv1beta1APIServiceHealth(&apiService)
|
||||
default:
|
||||
|
@ -34,19 +35,17 @@ func getAPIServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
|
||||
func getApiregistrationv1APIServiceHealth(apiservice *apiregistrationv1.APIService) (*HealthStatus, error) {
|
||||
for _, c := range apiservice.Status.Conditions {
|
||||
switch c.Type {
|
||||
case apiregistrationv1.Available:
|
||||
if c.Type == apiregistrationv1.Available {
|
||||
if c.Status == apiregistrationv1.ConditionTrue {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &HealthStatus{
|
||||
|
@ -57,19 +56,17 @@ func getApiregistrationv1APIServiceHealth(apiservice *apiregistrationv1.APIServi
|
|||
|
||||
func getApiregistrationv1beta1APIServiceHealth(apiservice *apiregistrationv1beta1.APIService) (*HealthStatus, error) {
|
||||
for _, c := range apiservice.Status.Conditions {
|
||||
switch c.Type {
|
||||
case apiregistrationv1beta1.Available:
|
||||
if c.Type == apiregistrationv1beta1.Available {
|
||||
if c.Status == apiregistrationv1beta1.ConditionTrue {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("%s: %s", c.Reason, c.Message),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &HealthStatus{
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
@ -8,13 +10,13 @@ import (
|
|||
type nodePhase string
|
||||
|
||||
// Workflow and node statuses
|
||||
// See: https://github.com/argoproj/argo-workflows/blob/master/pkg/apis/workflow/v1alpha1/workflow_phase.go
|
||||
const (
|
||||
nodePending nodePhase = "Pending"
|
||||
nodeRunning nodePhase = "Running"
|
||||
nodeSucceeded nodePhase = "Succeeded"
|
||||
// nodeSkipped nodePhase = "Skipped"
|
||||
nodeFailed nodePhase = "Failed"
|
||||
nodeError nodePhase = "Error"
|
||||
nodeFailed nodePhase = "Failed"
|
||||
nodeError nodePhase = "Error"
|
||||
)
|
||||
|
||||
// An agnostic workflow object only considers Status.Phase and Status.Message. It is agnostic to the API version or any
|
||||
|
@ -30,15 +32,15 @@ func getArgoWorkflowHealth(obj *unstructured.Unstructured) (*HealthStatus, error
|
|||
var wf argoWorkflow
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &wf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to convert unstructured to argoworkflow: %w", err)
|
||||
}
|
||||
switch wf.Status.Phase {
|
||||
case nodePending, nodeRunning:
|
||||
case "", nodePending, nodeRunning:
|
||||
return &HealthStatus{Status: HealthStatusProgressing, Message: wf.Status.Message}, nil
|
||||
case nodeSucceeded:
|
||||
return &HealthStatus{Status: HealthStatusHealthy, Message: wf.Status.Message}, nil
|
||||
case nodeFailed, nodeError:
|
||||
return &HealthStatus{Status: HealthStatusDegraded, Message: wf.Status.Message}, nil
|
||||
}
|
||||
return &HealthStatus{Status: HealthStatusHealthy, Message: wf.Status.Message}, nil
|
||||
return &HealthStatus{Status: HealthStatusUnknown, Message: wf.Status.Message}, nil
|
||||
}
|
||||
|
|
|
@ -3,12 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getDaemonSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -18,23 +17,9 @@ func getDaemonSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var daemon appsv1.DaemonSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &daemon)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured DaemonSet to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured DaemonSet to typed: %w", err)
|
||||
}
|
||||
return getAppsv1DaemonSetHealth(&daemon)
|
||||
case appsv1beta2.SchemeGroupVersion.WithKind(kube.DaemonSetKind):
|
||||
var daemon appsv1beta2.DaemonSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &daemon)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured DaemonSet to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta1DaemonSetHealth(&daemon)
|
||||
case extv1beta1.SchemeGroupVersion.WithKind(kube.DaemonSetKind):
|
||||
var daemon extv1beta1.DaemonSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &daemon)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured DaemonSet to typed: %v", err)
|
||||
}
|
||||
return getExtv1beta1DaemonSetHealth(&daemon)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported DaemonSet GVK: %s", gvk)
|
||||
}
|
||||
|
@ -42,93 +27,28 @@ func getDaemonSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
|
||||
func getAppsv1DaemonSetHealth(daemon *appsv1.DaemonSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L110
|
||||
if daemon.Generation <= daemon.Status.ObservedGeneration {
|
||||
if daemon.Spec.UpdateStrategy.Type == appsv1.OnDeleteDaemonSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("daemon set %d out of %d new pods have been updated", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
if daemon.Generation > daemon.Status.ObservedGeneration {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed daemon set generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta1DaemonSetHealth(daemon *appsv1beta2.DaemonSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L110
|
||||
if daemon.Generation <= daemon.Status.ObservedGeneration {
|
||||
if daemon.Spec.UpdateStrategy.Type == appsv1beta2.OnDeleteDaemonSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("daemon set %d out of %d new pods have been updated", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed daemon set generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getExtv1beta1DaemonSetHealth(daemon *extv1beta1.DaemonSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L110
|
||||
if daemon.Generation <= daemon.Status.ObservedGeneration {
|
||||
if daemon.Spec.UpdateStrategy.Type == extv1beta1.OnDeleteDaemonSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("daemon set %d out of %d new pods have been updated", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed daemon set generation less then desired generation",
|
||||
Message: "Waiting for rollout to finish: observed daemon set generation less than desired generation",
|
||||
}, nil
|
||||
}
|
||||
if daemon.Spec.UpdateStrategy.Type == appsv1.OnDeleteDaemonSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("daemon set %d out of %d new pods have been updated", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
|
|
|
@ -3,12 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getDeploymentHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -18,23 +17,9 @@ func getDeploymentHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
var deployment appsv1.Deployment
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Deployment to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured Deployment to typed: %w", err)
|
||||
}
|
||||
return getAppsv1DeploymentHealth(&deployment)
|
||||
case appsv1beta1.SchemeGroupVersion.WithKind(kube.DeploymentKind):
|
||||
var deployment appsv1beta1.Deployment
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Deployment to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta1DeploymentHealth(&deployment)
|
||||
case extv1beta1.SchemeGroupVersion.WithKind(kube.DeploymentKind):
|
||||
var deployment extv1beta1.Deployment
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Deployment to typed: %v", err)
|
||||
}
|
||||
return getExtv1beta1DeploymentHealth(&deployment)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported Deployment GVK: %s", gvk)
|
||||
}
|
||||
|
@ -50,22 +35,23 @@ func getAppsv1DeploymentHealth(deployment *appsv1.Deployment) (*HealthStatus, er
|
|||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L80
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
cond := getAppsv1DeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
|
||||
switch {
|
||||
case cond != nil && cond.Reason == "ProgressDeadlineExceeded":
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: fmt.Sprintf("Deployment %q exceeded its progress deadline", deployment.Name),
|
||||
}, nil
|
||||
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
|
||||
case deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas),
|
||||
}, nil
|
||||
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
|
||||
case deployment.Status.Replicas > deployment.Status.UpdatedReplicas:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...", deployment.Status.Replicas-deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
|
||||
case deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas),
|
||||
|
@ -74,93 +60,7 @@ func getAppsv1DeploymentHealth(deployment *appsv1.Deployment) (*HealthStatus, er
|
|||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed deployment generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta1DeploymentHealth(deployment *appsv1beta1.Deployment) (*HealthStatus, error) {
|
||||
if deployment.Spec.Paused {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusSuspended,
|
||||
Message: "Deployment is paused",
|
||||
}, nil
|
||||
}
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L80
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
cond := getAppsv1beta1DeploymentCondition(deployment.Status, appsv1beta1.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: fmt.Sprintf("Deployment %q exceeded its progress deadline", deployment.Name),
|
||||
}, nil
|
||||
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas),
|
||||
}, nil
|
||||
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...", deployment.Status.Replicas-deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed deployment generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getExtv1beta1DeploymentHealth(deployment *extv1beta1.Deployment) (*HealthStatus, error) {
|
||||
if deployment.Spec.Paused {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusSuspended,
|
||||
Message: "Deployment is paused",
|
||||
}, nil
|
||||
}
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L80
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
cond := getExtv1beta1DeploymentCondition(deployment.Status, extv1beta1.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: fmt.Sprintf("Deployment %q exceeded its progress deadline", deployment.Name),
|
||||
}, nil
|
||||
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas),
|
||||
}, nil
|
||||
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...", deployment.Status.Replicas-deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed deployment generation less then desired generation",
|
||||
Message: "Waiting for rollout to finish: observed deployment generation less than desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -178,22 +78,3 @@ func getAppsv1DeploymentCondition(status appsv1.DeploymentStatus, condType appsv
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func getAppsv1beta1DeploymentCondition(status appsv1beta1.DeploymentStatus, condType appsv1beta1.DeploymentConditionType) *appsv1beta1.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getExtv1beta1DeploymentCondition(status extv1beta1.DeploymentStatus, condType extv1beta1.DeploymentConditionType) *extv1beta1.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,175 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
var progressingStatus = &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting to Autoscale",
|
||||
}
|
||||
|
||||
type hpaCondition struct {
|
||||
Type string
|
||||
Reason string
|
||||
Message string
|
||||
Status string
|
||||
}
|
||||
|
||||
func getHPAHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
gvk := obj.GroupVersionKind()
|
||||
failedConversionMsg := "failed to convert unstructured HPA to typed: %v"
|
||||
|
||||
switch gvk {
|
||||
case autoscalingv1.SchemeGroupVersion.WithKind(kube.HorizontalPodAutoscalerKind):
|
||||
var hpa autoscalingv1.HorizontalPodAutoscaler
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &hpa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(failedConversionMsg, err)
|
||||
}
|
||||
return getAutoScalingV1HPAHealth(&hpa)
|
||||
case autoscalingv2beta1.SchemeGroupVersion.WithKind(kube.HorizontalPodAutoscalerKind):
|
||||
var hpa autoscalingv2beta1.HorizontalPodAutoscaler
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &hpa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(failedConversionMsg, err)
|
||||
}
|
||||
return getAutoScalingV2beta1HPAHealth(&hpa)
|
||||
case autoscalingv2beta2.SchemeGroupVersion.WithKind(kube.HorizontalPodAutoscalerKind):
|
||||
var hpa autoscalingv2beta2.HorizontalPodAutoscaler
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &hpa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(failedConversionMsg, err)
|
||||
}
|
||||
return getAutoScalingV2beta2HPAHealth(&hpa)
|
||||
case autoscalingv2.SchemeGroupVersion.WithKind(kube.HorizontalPodAutoscalerKind):
|
||||
var hpa autoscalingv2.HorizontalPodAutoscaler
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &hpa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(failedConversionMsg, err)
|
||||
}
|
||||
return getAutoScalingV2HPAHealth(&hpa)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported HPA GVK: %s", gvk)
|
||||
}
|
||||
}
|
||||
|
||||
func getAutoScalingV2HPAHealth(hpa *autoscalingv2.HorizontalPodAutoscaler) (*HealthStatus, error) {
|
||||
statusConditions := hpa.Status.Conditions
|
||||
conditions := make([]hpaCondition, 0, len(statusConditions))
|
||||
for _, statusCondition := range statusConditions {
|
||||
conditions = append(conditions, hpaCondition{
|
||||
Type: string(statusCondition.Type),
|
||||
Reason: statusCondition.Reason,
|
||||
Message: statusCondition.Message,
|
||||
Status: string(statusCondition.Status),
|
||||
})
|
||||
}
|
||||
|
||||
return checkConditions(conditions, progressingStatus)
|
||||
}
|
||||
|
||||
func getAutoScalingV2beta2HPAHealth(hpa *autoscalingv2beta2.HorizontalPodAutoscaler) (*HealthStatus, error) {
|
||||
statusConditions := hpa.Status.Conditions
|
||||
conditions := make([]hpaCondition, 0, len(statusConditions))
|
||||
for _, statusCondition := range statusConditions {
|
||||
conditions = append(conditions, hpaCondition{
|
||||
Type: string(statusCondition.Type),
|
||||
Reason: statusCondition.Reason,
|
||||
Message: statusCondition.Message,
|
||||
Status: string(statusCondition.Status),
|
||||
})
|
||||
}
|
||||
|
||||
return checkConditions(conditions, progressingStatus)
|
||||
}
|
||||
|
||||
func getAutoScalingV2beta1HPAHealth(hpa *autoscalingv2beta1.HorizontalPodAutoscaler) (*HealthStatus, error) {
|
||||
statusConditions := hpa.Status.Conditions
|
||||
conditions := make([]hpaCondition, 0, len(statusConditions))
|
||||
for _, statusCondition := range statusConditions {
|
||||
conditions = append(conditions, hpaCondition{
|
||||
Type: string(statusCondition.Type),
|
||||
Reason: statusCondition.Reason,
|
||||
Message: statusCondition.Message,
|
||||
Status: string(statusCondition.Status),
|
||||
})
|
||||
}
|
||||
|
||||
return checkConditions(conditions, progressingStatus)
|
||||
}
|
||||
|
||||
func getAutoScalingV1HPAHealth(hpa *autoscalingv1.HorizontalPodAutoscaler) (*HealthStatus, error) {
|
||||
annotation, ok := hpa.GetAnnotations()["autoscaling.alpha.kubernetes.io/conditions"]
|
||||
if !ok {
|
||||
return progressingStatus, nil
|
||||
}
|
||||
|
||||
var conditions []hpaCondition
|
||||
err := json.Unmarshal([]byte(annotation), &conditions)
|
||||
if err != nil {
|
||||
failedMessage := "failed to convert conditions annotation to typed: %v"
|
||||
return nil, fmt.Errorf(failedMessage, err)
|
||||
}
|
||||
|
||||
if len(conditions) == 0 {
|
||||
return progressingStatus, nil
|
||||
}
|
||||
|
||||
return checkConditions(conditions, progressingStatus)
|
||||
}
|
||||
|
||||
func checkConditions(conditions []hpaCondition, progressingStatus *HealthStatus) (*HealthStatus, error) {
|
||||
for _, condition := range conditions {
|
||||
if isDegraded(&condition) {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: condition.Message,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if isHealthy(&condition) {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: condition.Message,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return progressingStatus, nil
|
||||
}
|
||||
|
||||
func isDegraded(condition *hpaCondition) bool {
|
||||
degraded_states := []hpaCondition{
|
||||
{Type: "AbleToScale", Reason: "FailedGetScale"},
|
||||
{Type: "AbleToScale", Reason: "FailedUpdateScale"},
|
||||
{Type: "ScalingActive", Reason: "FailedGetResourceMetric"},
|
||||
{Type: "ScalingActive", Reason: "InvalidSelector"},
|
||||
}
|
||||
for _, degraded_state := range degraded_states {
|
||||
if condition.Type == degraded_state.Type && condition.Reason == degraded_state.Reason {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isHealthy(condition *hpaCondition) bool {
|
||||
healthyConditionTypes := []string{"AbleToScale", "ScalingLimited"}
|
||||
for _, conditionType := range healthyConditionTypes {
|
||||
if condition.Type == conditionType && condition.Status == "True" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,68 +1,13 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func getIngressHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
gvk := obj.GroupVersionKind()
|
||||
switch gvk {
|
||||
case networkingv1.SchemeGroupVersion.WithKind(kube.IngressKind):
|
||||
var ingress networkingv1.Ingress
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &ingress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Ingress to typed: %v", err)
|
||||
}
|
||||
return getNetworkingv1IngressHealth(&ingress)
|
||||
case networkingv1beta1.SchemeGroupVersion.WithKind(kube.IngressKind):
|
||||
var ingress networkingv1beta1.Ingress
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &ingress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Ingress to typed: %v", err)
|
||||
}
|
||||
return getNetworkingv1beta1IngressHealth(&ingress)
|
||||
case extv1beta1.SchemeGroupVersion.WithKind(kube.IngressKind):
|
||||
var ingress extv1beta1.Ingress
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &ingress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Ingress to typed: %v", err)
|
||||
}
|
||||
return getExtv1beta1IngressHealth(&ingress)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported Ingress GVK: %s", gvk)
|
||||
}
|
||||
}
|
||||
|
||||
func getNetworkingv1IngressHealth(ingress *networkingv1.Ingress) (*HealthStatus, error) {
|
||||
ingresses, _, _ := unstructured.NestedSlice(obj.Object, "status", "loadBalancer", "ingress")
|
||||
health := HealthStatus{}
|
||||
if len(ingress.Status.LoadBalancer.Ingress) > 0 {
|
||||
health.Status = HealthStatusHealthy
|
||||
} else {
|
||||
health.Status = HealthStatusProgressing
|
||||
}
|
||||
return &health, nil
|
||||
}
|
||||
|
||||
func getNetworkingv1beta1IngressHealth(ingress *networkingv1beta1.Ingress) (*HealthStatus, error) {
|
||||
health := HealthStatus{}
|
||||
if len(ingress.Status.LoadBalancer.Ingress) > 0 {
|
||||
health.Status = HealthStatusHealthy
|
||||
} else {
|
||||
health.Status = HealthStatusProgressing
|
||||
}
|
||||
return &health, nil
|
||||
}
|
||||
|
||||
func getExtv1beta1IngressHealth(ingress *extv1beta1.Ingress) (*HealthStatus, error) {
|
||||
health := HealthStatus{}
|
||||
if len(ingress.Status.LoadBalancer.Ingress) > 0 {
|
||||
if len(ingresses) > 0 {
|
||||
health.Status = HealthStatusHealthy
|
||||
} else {
|
||||
health.Status = HealthStatusProgressing
|
||||
|
|
|
@ -3,10 +3,13 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getJobHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -16,7 +19,7 @@ func getJobHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var job batchv1.Job
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &job)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Job to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured Job to typed: %w", err)
|
||||
}
|
||||
return getBatchv1JobHealth(&job)
|
||||
default:
|
||||
|
@ -29,6 +32,7 @@ func getBatchv1JobHealth(job *batchv1.Job) (*HealthStatus, error) {
|
|||
var failMsg string
|
||||
complete := false
|
||||
var message string
|
||||
isSuspended := false
|
||||
for _, condition := range job.Status.Conditions {
|
||||
switch condition.Type {
|
||||
case batchv1.JobFailed:
|
||||
|
@ -38,19 +42,31 @@ func getBatchv1JobHealth(job *batchv1.Job) (*HealthStatus, error) {
|
|||
case batchv1.JobComplete:
|
||||
complete = true
|
||||
message = condition.Message
|
||||
case batchv1.JobSuspended:
|
||||
complete = true
|
||||
message = condition.Message
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
isSuspended = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !complete {
|
||||
switch {
|
||||
case !complete:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: message,
|
||||
}, nil
|
||||
} else if failed {
|
||||
case failed:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: failMsg,
|
||||
}, nil
|
||||
} else {
|
||||
case isSuspended:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusSuspended,
|
||||
Message: failMsg,
|
||||
}, nil
|
||||
default:
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: message,
|
||||
|
|
|
@ -4,11 +4,12 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getPodHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -18,7 +19,7 @@ func getPodHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var pod corev1.Pod
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Pod to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured Pod to typed: %w", err)
|
||||
}
|
||||
return getCorev1PodHealth(&pod)
|
||||
default:
|
||||
|
@ -96,7 +97,7 @@ func getCorev1PodHealth(pod *corev1.Pod) (*HealthStatus, error) {
|
|||
switch pod.Spec.RestartPolicy {
|
||||
case corev1.RestartPolicyAlways:
|
||||
// if pod is ready, it is automatically healthy
|
||||
if podutil.IsPodReady(pod) {
|
||||
if podutils.IsPodReady(pod) {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: pod.Status.Message,
|
||||
|
|
|
@ -3,10 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getPVCHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -16,7 +17,7 @@ func getPVCHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var pvc corev1.PersistentVolumeClaim
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &pvc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured PersistentVolumeClaim to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured PersistentVolumeClaim to typed: %w", err)
|
||||
}
|
||||
return getCorev1PVCHealth(&pvc)
|
||||
default:
|
||||
|
|
|
@ -3,13 +3,12 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getReplicaSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -19,23 +18,9 @@ func getReplicaSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
var replicaSet appsv1.ReplicaSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &replicaSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured ReplicaSet to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured ReplicaSet to typed: %w", err)
|
||||
}
|
||||
return getAppsv1ReplicaSetHealth(&replicaSet)
|
||||
case appsv1beta2.SchemeGroupVersion.WithKind(kube.ReplicaSetKind):
|
||||
var replicaSet appsv1beta2.ReplicaSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &replicaSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured ReplicaSet to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta1ReplicaSetHealth(&replicaSet)
|
||||
case extv1beta1.SchemeGroupVersion.WithKind(kube.ReplicaSetKind):
|
||||
var replicaSet extv1beta1.ReplicaSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &replicaSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured ReplicaSet to typed: %v", err)
|
||||
}
|
||||
return getExtv1beta1ReplicaSetHealth(&replicaSet)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported ReplicaSet GVK: %s", gvk)
|
||||
}
|
||||
|
@ -58,59 +43,7 @@ func getAppsv1ReplicaSetHealth(replicaSet *appsv1.ReplicaSet) (*HealthStatus, er
|
|||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed replica set generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta1ReplicaSetHealth(replicaSet *appsv1beta2.ReplicaSet) (*HealthStatus, error) {
|
||||
if replicaSet.Generation <= replicaSet.Status.ObservedGeneration {
|
||||
cond := getAppsv1beta2ReplicaSetCondition(replicaSet.Status, appsv1beta2.ReplicaSetReplicaFailure)
|
||||
if cond != nil && cond.Status == corev1.ConditionTrue {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: cond.Message,
|
||||
}, nil
|
||||
} else if replicaSet.Spec.Replicas != nil && replicaSet.Status.AvailableReplicas < *replicaSet.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas are available...", replicaSet.Status.AvailableReplicas, *replicaSet.Spec.Replicas),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed replica set generation less then desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getExtv1beta1ReplicaSetHealth(replicaSet *extv1beta1.ReplicaSet) (*HealthStatus, error) {
|
||||
if replicaSet.Generation <= replicaSet.Status.ObservedGeneration {
|
||||
cond := getExtv1beta1ReplicaSetCondition(replicaSet.Status, extv1beta1.ReplicaSetReplicaFailure)
|
||||
if cond != nil && cond.Status == corev1.ConditionTrue {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusDegraded,
|
||||
Message: cond.Message,
|
||||
}, nil
|
||||
} else if replicaSet.Spec.Replicas != nil && replicaSet.Status.AvailableReplicas < *replicaSet.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas are available...", replicaSet.Status.AvailableReplicas, *replicaSet.Spec.Replicas),
|
||||
}, nil
|
||||
}
|
||||
} else {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for rollout to finish: observed replica set generation less then desired generation",
|
||||
Message: "Waiting for rollout to finish: observed replica set generation less than desired generation",
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -128,23 +61,3 @@ func getAppsv1ReplicaSetCondition(status appsv1.ReplicaSetStatus, condType appsv
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAppsv1beta2ReplicaSetCondition(status appsv1beta2.ReplicaSetStatus, condType appsv1beta2.ReplicaSetConditionType) *appsv1beta2.ReplicaSetCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getExtv1beta1ReplicaSetCondition(status extv1beta1.ReplicaSetStatus, condType extv1beta1.ReplicaSetConditionType) *extv1beta1.ReplicaSetCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -3,10 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -16,7 +17,7 @@ func getServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
|||
var service corev1.Service
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &service)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Service to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured Service to typed: %w", err)
|
||||
}
|
||||
return getCorev1ServiceHealth(&service)
|
||||
default:
|
||||
|
|
|
@ -3,12 +3,11 @@ package health
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
)
|
||||
|
||||
func getStatefulSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error) {
|
||||
|
@ -18,23 +17,9 @@ func getStatefulSetHealth(obj *unstructured.Unstructured) (*HealthStatus, error)
|
|||
var sts appsv1.StatefulSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &sts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured StatefulSet to typed: %v", err)
|
||||
return nil, fmt.Errorf("failed to convert unstructured StatefulSet to typed: %w", err)
|
||||
}
|
||||
return getAppsv1StatefulSetHealth(&sts)
|
||||
case appsv1beta1.SchemeGroupVersion.WithKind(kube.StatefulSetKind):
|
||||
var sts appsv1beta1.StatefulSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &sts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured StatefulSet to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta1StatefulSetHealth(&sts)
|
||||
case appsv1beta2.SchemeGroupVersion.WithKind(kube.StatefulSetKind):
|
||||
var sts appsv1beta2.StatefulSet
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &sts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured StatefulSet to typed: %v", err)
|
||||
}
|
||||
return getAppsv1beta2StatefulSetHealth(&sts)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported StatefulSet GVK: %s", gvk)
|
||||
}
|
||||
|
@ -86,102 +71,3 @@ func getAppsv1StatefulSetHealth(sts *appsv1.StatefulSet) (*HealthStatus, error)
|
|||
Message: fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...", sts.Status.CurrentReplicas, sts.Status.CurrentRevision),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta1StatefulSetHealth(sts *appsv1beta1.StatefulSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L131
|
||||
observedGeneration := sts.Status.ObservedGeneration
|
||||
if observedGeneration == nil {
|
||||
var x int64
|
||||
observedGeneration = &x
|
||||
}
|
||||
if *observedGeneration == 0 || sts.Generation > *observedGeneration {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for statefulset spec update to be observed...",
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for %d pods to be ready...", *sts.Spec.Replicas-sts.Status.ReadyReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.UpdateStrategy.Type == appsv1beta1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil {
|
||||
if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
|
||||
if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...",
|
||||
sts.Status.UpdatedReplicas, (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition)),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...", sts.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.UpdateStrategy.Type == appsv1beta1.OnDeleteStatefulSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("statefulset has %d ready pods", sts.Status.ReadyReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Status.UpdateRevision != sts.Status.CurrentRevision {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("waiting for statefulset rolling update to complete %d pods at revision %s...", sts.Status.UpdatedReplicas, sts.Status.UpdateRevision),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...", sts.Status.CurrentReplicas, sts.Status.CurrentRevision),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getAppsv1beta2StatefulSetHealth(sts *appsv1beta2.StatefulSet) (*HealthStatus, error) {
|
||||
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L131
|
||||
if sts.Status.ObservedGeneration == 0 || sts.Generation > sts.Status.ObservedGeneration {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: "Waiting for statefulset spec update to be observed...",
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for %d pods to be ready...", *sts.Spec.Replicas-sts.Status.ReadyReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.UpdateStrategy.Type == appsv1beta2.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil {
|
||||
if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
|
||||
if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...",
|
||||
sts.Status.UpdatedReplicas, (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition)),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...", sts.Status.UpdatedReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Spec.UpdateStrategy.Type == appsv1beta2.OnDeleteStatefulSetStrategyType {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("statefulset has %d ready pods", sts.Status.ReadyReplicas),
|
||||
}, nil
|
||||
}
|
||||
if sts.Status.UpdateRevision != sts.Status.CurrentRevision {
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusProgressing,
|
||||
Message: fmt.Sprintf("waiting for statefulset rolling update to complete %d pods at revision %s...", sts.Status.UpdatedReplicas, sts.Status.UpdateRevision),
|
||||
}, nil
|
||||
}
|
||||
return &HealthStatus{
|
||||
Status: HealthStatusHealthy,
|
||||
Message: fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...", sts.Status.CurrentReplicas, sts.Status.CurrentRevision),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ Package provides functionality that allows assessing the health state of a Kuber
|
|||
package health
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -15,13 +15,15 @@ import (
|
|||
)
|
||||
|
||||
func assertAppHealth(t *testing.T, yamlPath string, expectedStatus HealthStatusCode) {
|
||||
health := getHealthStatus(yamlPath, t)
|
||||
t.Helper()
|
||||
health := getHealthStatus(t, yamlPath)
|
||||
assert.NotNil(t, health)
|
||||
assert.Equal(t, expectedStatus, health.Status)
|
||||
}
|
||||
|
||||
func getHealthStatus(yamlPath string, t *testing.T) *HealthStatus {
|
||||
yamlBytes, err := ioutil.ReadFile(yamlPath)
|
||||
func getHealthStatus(t *testing.T, yamlPath string) *HealthStatus {
|
||||
t.Helper()
|
||||
yamlBytes, err := os.ReadFile(yamlPath)
|
||||
require.NoError(t, err)
|
||||
var obj unstructured.Unstructured
|
||||
err = yaml.Unmarshal(yamlBytes, &obj)
|
||||
|
@ -49,6 +51,7 @@ func TestStatefulSetOnDeleteHealth(t *testing.T) {
|
|||
func TestDaemonSetOnDeleteHealth(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/daemonset-ondelete.yaml", HealthStatusHealthy)
|
||||
}
|
||||
|
||||
func TestPVCHealth(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/pvc-bound.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/pvc-pending.yaml", HealthStatusProgressing)
|
||||
|
@ -68,13 +71,28 @@ func TestIngressHealth(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCRD(t *testing.T) {
|
||||
assert.Nil(t, getHealthStatus("./testdata/knative-service.yaml", t))
|
||||
assert.Nil(t, getHealthStatus(t, "./testdata/knative-service.yaml"))
|
||||
}
|
||||
|
||||
func TestJob(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/job-running.yaml", HealthStatusProgressing)
|
||||
assertAppHealth(t, "./testdata/job-failed.yaml", HealthStatusDegraded)
|
||||
assertAppHealth(t, "./testdata/job-succeeded.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/job-suspended.yaml", HealthStatusSuspended)
|
||||
}
|
||||
|
||||
func TestHPA(t *testing.T) {
|
||||
assertAppHealth(t, "./testdata/hpa-v2-healthy.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v2-degraded.yaml", HealthStatusDegraded)
|
||||
assertAppHealth(t, "./testdata/hpa-v2-progressing.yaml", HealthStatusProgressing)
|
||||
assertAppHealth(t, "./testdata/hpa-v2beta2-healthy.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v2beta1-healthy-disabled.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v2beta1-healthy.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v1-degraded.yaml", HealthStatusDegraded)
|
||||
assertAppHealth(t, "./testdata/hpa-v1-healthy.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v1-healthy-toofew.yaml", HealthStatusHealthy)
|
||||
assertAppHealth(t, "./testdata/hpa-v1-progressing.yaml", HealthStatusProgressing)
|
||||
assertAppHealth(t, "./testdata/hpa-v1-progressing-with-no-annotations.yaml", HealthStatusProgressing)
|
||||
}
|
||||
|
||||
func TestPod(t *testing.T) {
|
||||
|
@ -92,8 +110,8 @@ func TestPod(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestApplication(t *testing.T) {
|
||||
assert.Nil(t, getHealthStatus("./testdata/application-healthy.yaml", t))
|
||||
assert.Nil(t, getHealthStatus("./testdata/application-degraded.yaml", t))
|
||||
assert.Nil(t, getHealthStatus(t, "./testdata/application-healthy.yaml"))
|
||||
assert.Nil(t, getHealthStatus(t, "./testdata/application-degraded.yaml"))
|
||||
}
|
||||
|
||||
func TestAPIService(t *testing.T) {
|
||||
|
@ -104,16 +122,17 @@ func TestAPIService(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetArgoWorkflowHealth(t *testing.T) {
|
||||
sampleWorkflow := unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
sampleWorkflow := unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"spec": map[string]any{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
},
|
||||
"status": map[string]any{
|
||||
"phase": "Running",
|
||||
"message": "This node is running",
|
||||
},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"phase": "Running",
|
||||
"message": "This node is running",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
health, err := getArgoWorkflowHealth(&sampleWorkflow)
|
||||
|
@ -121,20 +140,35 @@ func TestGetArgoWorkflowHealth(t *testing.T) {
|
|||
assert.Equal(t, HealthStatusProgressing, health.Status)
|
||||
assert.Equal(t, "This node is running", health.Message)
|
||||
|
||||
sampleWorkflow = unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
sampleWorkflow = unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"spec": map[string]any{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
},
|
||||
"status": map[string]any{
|
||||
"phase": "Succeeded",
|
||||
"message": "This node is has succeeded",
|
||||
},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"phase": "Succeeded",
|
||||
"message": "This node is has succeeded",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
health, err = getArgoWorkflowHealth(&sampleWorkflow)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, HealthStatusHealthy, health.Status)
|
||||
assert.Equal(t, "This node is has succeeded", health.Message)
|
||||
|
||||
sampleWorkflow = unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"spec": map[string]any{
|
||||
"entrypoint": "sampleEntryPoint",
|
||||
"extraneousKey": "we are agnostic to extraneous keys",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
health, err = getArgoWorkflowHealth(&sampleWorkflow)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, HealthStatusProgressing, health.Status)
|
||||
assert.Empty(t, health.Message)
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "4"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1beta2","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
creationTimestamp: 2018-07-18T04:40:44Z
|
||||
generation: 4
|
||||
labels:
|
||||
|
@ -12,7 +12,7 @@ metadata:
|
|||
name: guestbook-ui
|
||||
namespace: default
|
||||
resourceVersion: "13660"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/guestbook-ui
|
||||
selfLink: /apis/apps/v1/namespaces/default/deployments/guestbook-ui
|
||||
uid: bb9af0c7-8a44-11e8-9e23-42010aa80010
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "4"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1beta2","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
creationTimestamp: 2018-07-18T04:40:44Z
|
||||
generation: 4
|
||||
labels:
|
||||
|
@ -12,7 +12,7 @@ metadata:
|
|||
name: guestbook-ui
|
||||
namespace: default
|
||||
resourceVersion: "12819"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/guestbook-ui
|
||||
selfLink: /apis/apps/v1/namespaces/default/deployments/guestbook-ui
|
||||
uid: bb9af0c7-8a44-11e8-9e23-42010aa80010
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "4"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1beta2","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}}
|
||||
creationTimestamp: 2018-07-18T04:40:44Z
|
||||
generation: 4
|
||||
labels:
|
||||
|
@ -12,7 +12,7 @@ metadata:
|
|||
name: guestbook-ui
|
||||
namespace: default
|
||||
resourceVersion: "12819"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/guestbook-ui
|
||||
selfLink: /apis/apps/v1/namespaces/default/deployments/guestbook-ui
|
||||
uid: bb9af0c7-8a44-11e8-9e23-42010aa80010
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
annotations:
|
||||
autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"True","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"FailedGetScale","message":"the HPA controller was unable to get the target''s current scale"},{"type":"ScalingActive","status":"False","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"FailedGetResourceMetric","message":"the
|
||||
HPA was unable to compute the replica count: unable to get metrics for resource
|
||||
cpu: unable to fetch metrics from resource metrics API: the server is currently
|
||||
unable to handle the request (get pods.metrics.k8s.io)"}]'
|
||||
name: sample
|
||||
namespace: argocd
|
||||
spec:
|
||||
maxReplicas: 1
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: sample
|
||||
targetCPUUtilizationPercentage: 2
|
||||
status:
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 0
|
|
@ -0,0 +1,28 @@
|
|||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
annotations:
|
||||
autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"True","lastTransitionTime":"2021-03-08T17:42:11Z","reason":"ReadyForNewScale","message":"recommended
|
||||
size matches current size"},{"type":"ScalingActive","status":"True","lastTransitionTime":"2021-03-09T23:59:49Z","reason":"ValidMetricFound","message":"the
|
||||
HPA was able to successfully calculate a replica count from memory resource
|
||||
utilization (percentage of request)"},{"type":"ScalingLimited","status":"True","lastTransitionTime":"2021-03-10T10:02:12Z","reason":"TooFewReplicas","message":"the
|
||||
desired replica count is less than the minimum replica count"}]'
|
||||
autoscaling.alpha.kubernetes.io/current-metrics: '[{"type":"Resource","resource":{"name":"memory","currentAverageUtilization":2,"currentAverageValue":"3452928"}},{"type":"Resource","resource":{"name":"cpu","currentAverageUtilization":1,"currentAverageValue":"1m"}}]'
|
||||
name: sample
|
||||
namespace: default
|
||||
resourceVersion: "41720063"
|
||||
selfLink: /apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers/sample
|
||||
uid: cbe887e2-93d6-40de-8f03-7eb7e2d7f978
|
||||
spec:
|
||||
maxReplicas: 10
|
||||
minReplicas: 2
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: sample
|
||||
targetCPUUtilizationPercentage: 80
|
||||
status:
|
||||
currentCPUUtilizationPercentage: 1
|
||||
currentReplicas: 2
|
||||
desiredReplicas: 2
|
||||
lastScaleTime: "2021-03-08T17:42:11Z"
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
annotations:
|
||||
autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"True","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"SucceededRescale","message":"the HPA controller was able to update the target scale to 1"},{"type":"ScalingActive","status":"False","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"FailedGetResourceMetric","message":"the
|
||||
HPA was unable to compute the replica count: unable to get metrics for resource
|
||||
cpu: unable to fetch metrics from resource metrics API: the server is currently
|
||||
unable to handle the request (get pods.metrics.k8s.io)"}]'
|
||||
name: sample
|
||||
namespace: argocd
|
||||
spec:
|
||||
maxReplicas: 2
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: sample
|
||||
targetCPUUtilizationPercentage: 2
|
||||
status:
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: sample
|
||||
namespace: argocd
|
||||
spec:
|
||||
maxReplicas: 2
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: sample
|
||||
targetCPUUtilizationPercentage: 2
|
||||
status:
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
annotations:
|
||||
autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"False","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"SucceededGetScale","message":"the HPA controller was not able to get the target''s current scale"}]'
|
||||
name: sample
|
||||
namespace: argocd
|
||||
spec:
|
||||
maxReplicas: 1
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: sample
|
||||
targetCPUUtilizationPercentage: 2
|
||||
status:
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 0
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
creationTimestamp: "2022-01-17T14:22:27Z"
|
||||
name: sample
|
||||
uid: 0e6d855e-83ed-4ed5-b80a-461a750f14db
|
||||
spec:
|
||||
maxReplicas: 2
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: argocd-server
|
||||
targetCPUUtilizationPercentage: 80
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: "2022-04-14T19:44:23Z"
|
||||
message: 'the HPA controller was unable to get the target''s current scale: deployments/scale.apps
|
||||
"sandbox-test-app-8" not found'
|
||||
reason: FailedGetScale
|
||||
status: "False"
|
||||
type: AbleToScale
|
||||
- lastTransitionTime: "2022-04-14T15:41:57Z"
|
||||
message: the HPA was able to successfully calculate a replica count from cpu resource
|
||||
utilization (percentage of request)
|
||||
reason: ValidMetricFound
|
||||
status: "True"
|
||||
type: ScalingActive
|
||||
- lastTransitionTime: "2022-01-17T14:24:13Z"
|
||||
message: the desired count is within the acceptable range
|
||||
reason: DesiredWithinRange
|
||||
status: "False"
|
||||
type: ScalingLimited
|
||||
currentMetrics:
|
||||
- resource:
|
||||
current:
|
||||
averageUtilization: 6
|
||||
averageValue: 12m
|
||||
name: cpu
|
||||
type: Resource
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
creationTimestamp: '2022-05-13T12:39:31Z'
|
||||
name: sample
|
||||
uid: 0e6d855e-83ed-4ed5-b80a-461a750f14db
|
||||
spec:
|
||||
maxReplicas: 2
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: argocd-server
|
||||
targetCPUUtilizationPercentage: 80
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-05-13T12:40:34Z'
|
||||
message: recommended size matches current size
|
||||
reason: ReadyForNewScale
|
||||
status: 'True'
|
||||
type: AbleToScale
|
||||
- lastTransitionTime: '2022-05-13T12:40:33Z'
|
||||
message: >-
|
||||
the HPA was able to successfully calculate a replica count from cpu
|
||||
resource utilization (percentage of request)
|
||||
reason: ValidMetricFound
|
||||
status: 'True'
|
||||
type: ScalingActive
|
||||
- lastTransitionTime: '2022-05-13T12:40:31Z'
|
||||
message: the desired count is within the acceptable range
|
||||
reason: DesiredWithinRange
|
||||
status: 'False'
|
||||
type: ScalingLimited
|
||||
currentMetrics:
|
||||
- resource:
|
||||
current:
|
||||
averageUtilization: 6
|
||||
averageValue: 12m
|
||||
name: cpu
|
||||
type: Resource
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
|
@ -0,0 +1,37 @@
|
|||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
creationTimestamp: '2022-05-13T12:39:31Z'
|
||||
name: sample
|
||||
uid: 0e6d855e-83ed-4ed5-b80a-461a750f14db
|
||||
spec:
|
||||
maxReplicas: 2
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: argocd-server
|
||||
targetCPUUtilizationPercentage: 80
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-05-13T12:40:33Z'
|
||||
message: >-
|
||||
the HPA was able to successfully calculate a replica count from cpu
|
||||
resource utilization (percentage of request)
|
||||
reason: ValidMetricFound
|
||||
status: 'True'
|
||||
type: ScalingActive
|
||||
- lastTransitionTime: '2022-05-13T12:40:31Z'
|
||||
message: the desired count is within the acceptable range
|
||||
reason: DesiredWithinRange
|
||||
status: 'False'
|
||||
type: ScalingLimited
|
||||
currentMetrics:
|
||||
- resource:
|
||||
current:
|
||||
averageUtilization: 6
|
||||
averageValue: 12m
|
||||
name: cpu
|
||||
type: Resource
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
|
@ -0,0 +1,37 @@
|
|||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
creationTimestamp: '2021-09-15T09:31:50Z'
|
||||
name: sample
|
||||
namespace: argocd
|
||||
resourceVersion: '18886245'
|
||||
selfLink: >-
|
||||
/apis/autoscaling/v2beta1/namespaces/argocd/horizontalpodautoscalers/sample
|
||||
uid: c10a6092-1607-11ec-a314-020fc740624d
|
||||
spec:
|
||||
maxReplicas: 3
|
||||
metrics:
|
||||
- resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: 80
|
||||
type: Resource
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: test
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2021-09-15T09:32:05Z'
|
||||
message: the HPA controller was able to get the target's current scale
|
||||
reason: SucceededGetScale
|
||||
status: 'True'
|
||||
type: AbleToScale
|
||||
- lastTransitionTime: '2021-09-15T09:32:05Z'
|
||||
message: scaling is disabled since the replica count of the target is zero
|
||||
reason: ScalingDisabled
|
||||
status: 'False'
|
||||
type: ScalingActive
|
||||
currentMetrics: []
|
||||
currentReplicas: 0
|
||||
desiredReplicas: 0
|
|
@ -0,0 +1,72 @@
|
|||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/last-applied-configuration: >
|
||||
{"apiVersion":"autoscaling/v2beta1","kind":"HorizontalPodAutoscaler","metadata":{"annotations":{},"labels":{"app.kubernetes.io/component":"repo-server","app.kubernetes.io/instance":"argocd","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/name":"argocd-repo-server-hpa","app.kubernetes.io/part-of":"argocd","argocd.argoproj.io/instance":"argocd","helm.sh/chart":"argo-cd-2.5.0"},"name":"argocd-repo-server-hpa","namespace":"argocd"},"spec":{"maxReplicas":40,"metrics":[{"resource":{"name":"memory","targetAverageUtilization":150},"type":"Resource"},{"resource":{"name":"cpu","targetAverageUtilization":80},"type":"Resource"}],"minReplicas":1,"scaleTargetRef":{"apiVersion":"apps/v1","kind":"Deployment","name":"argocd-repo-server"}}}
|
||||
meta.helm.sh/release-name: argocd
|
||||
meta.helm.sh/release-namespace: argocd
|
||||
creationTimestamp: '2020-09-01T23:37:42Z'
|
||||
labels:
|
||||
app.kubernetes.io/component: repo-server
|
||||
app.kubernetes.io/instance: argocd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: argocd-repo-server-hpa
|
||||
app.kubernetes.io/part-of: argocd
|
||||
argocd.argoproj.io/instance: argocd
|
||||
helm.sh/chart: argo-cd-2.5.0
|
||||
name: argocd-repo-server-hpa
|
||||
namespace: argocd
|
||||
resourceVersion: '65843573'
|
||||
selfLink: >-
|
||||
/apis/autoscaling/v2beta1/namespaces/argocd/horizontalpodautoscalers/argocd-repo-server-hpa
|
||||
uid: ca7e0de8-7eb1-404a-b2f9-b9702b88ca8b
|
||||
spec:
|
||||
maxReplicas: 40
|
||||
metrics:
|
||||
- resource:
|
||||
name: memory
|
||||
targetAverageUtilization: 150
|
||||
type: Resource
|
||||
- resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: 80
|
||||
type: Resource
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: argocd-repo-server
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2020-09-16T17:59:42Z'
|
||||
message: recommended size matches current size
|
||||
reason: ReadyForNewScale
|
||||
status: 'True'
|
||||
type: AbleToScale
|
||||
- lastTransitionTime: '2020-11-09T21:31:12Z'
|
||||
message: >-
|
||||
the HPA was able to successfully calculate a replica count from memory
|
||||
resource utilization (percentage of request)
|
||||
reason: ValidMetricFound
|
||||
status: 'True'
|
||||
type: ScalingActive
|
||||
- lastTransitionTime: '2020-11-14T23:12:46Z'
|
||||
message: the desired count is within the acceptable range
|
||||
reason: DesiredWithinRange
|
||||
status: 'False'
|
||||
type: ScalingLimited
|
||||
currentMetrics:
|
||||
- resource:
|
||||
currentAverageUtilization: 12
|
||||
currentAverageValue: '65454080'
|
||||
name: memory
|
||||
type: Resource
|
||||
- resource:
|
||||
currentAverageUtilization: 2
|
||||
currentAverageValue: 12m
|
||||
name: cpu
|
||||
type: Resource
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
||||
lastScaleTime: '2020-12-07T22:59:53Z'
|
|
@ -0,0 +1,49 @@
|
|||
apiVersion: autoscaling/v2beta2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
creationTimestamp: '2020-08-07T00:13:31Z'
|
||||
name: credential-hpa
|
||||
uid: 04d9992e-a849-4cce-9e1e-121a62d5c001
|
||||
spec:
|
||||
maxReplicas: 1
|
||||
metrics:
|
||||
- resource:
|
||||
name: cpu
|
||||
target:
|
||||
averageUtilization: 65
|
||||
type: Utilization
|
||||
type: Resource
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Rollout
|
||||
name: credential-rollout
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2020-08-07T00:13:46Z'
|
||||
message: recommended size matches current size
|
||||
reason: ReadyForNewScale
|
||||
status: 'True'
|
||||
type: AbleToScale
|
||||
- lastTransitionTime: '2020-12-12T07:13:46Z'
|
||||
message: >-
|
||||
the HPA was able to successfully calculate a replica count from cpu
|
||||
resource utilization (percentage of request)
|
||||
reason: ValidMetricFound
|
||||
status: 'True'
|
||||
type: ScalingActive
|
||||
- lastTransitionTime: '2020-12-09T23:28:43Z'
|
||||
message: the desired count is within the acceptable range
|
||||
reason: DesiredWithinRange
|
||||
status: 'False'
|
||||
type: ScalingLimited
|
||||
currentMetrics:
|
||||
- resource:
|
||||
current:
|
||||
averageUtilization: 27
|
||||
averageValue: 195m
|
||||
name: cpu
|
||||
type: Resource
|
||||
currentReplicas: 1
|
||||
desiredReplicas: 1
|
||||
lastScaleTime: '2020-08-07T00:13:46Z'
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
generation: 1
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
|
@ -10,7 +10,7 @@ metadata:
|
|||
name: argocd-server-ingress
|
||||
namespace: argocd
|
||||
resourceVersion: "23207680"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
selfLink: /apis/networking.k8s.io/v1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
|
||||
spec:
|
||||
rules:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
|
@ -10,7 +10,7 @@ metadata:
|
|||
name: argocd-server-ingress
|
||||
namespace: argocd
|
||||
resourceVersion: "23207680"
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
selfLink: /apis/networking.k8s.io/v1/namespaces/argocd/ingresses/argocd-server-ingress
|
||||
uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb
|
||||
spec:
|
||||
rules:
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
creationTimestamp: 2018-12-02T08:19:13Z
|
||||
labels:
|
||||
controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b
|
||||
job-name: succeed
|
||||
name: succeed
|
||||
namespace: argoci-workflows
|
||||
resourceVersion: "46535949"
|
||||
selfLink: /apis/batch/v1/namespaces/argoci-workflows/jobs/succeed
|
||||
uid: f3fe3a46-f60a-11e8-aa53-42010a80021b
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
completions: 1
|
||||
parallelism: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b
|
||||
suspend: true
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b
|
||||
job-name: succeed
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- sh
|
||||
- -c
|
||||
- sleep 10
|
||||
image: alpine:latest
|
||||
imagePullPolicy: Always
|
||||
name: succeed
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Never
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
conditions:
|
||||
- lastProbeTime: "2022-12-08T22:27:20Z"
|
||||
lastTransitionTime: "2022-12-08T22:27:20Z"
|
||||
message: Job suspended
|
||||
reason: JobSuspended
|
||||
status: "True"
|
||||
type: Suspended
|
|
@ -10,7 +10,7 @@ metadata:
|
|||
deletionTimestamp: 2018-12-03T10:16:04Z
|
||||
spec:
|
||||
containers:
|
||||
- image: doesnt-exist
|
||||
- image: does-not-exist
|
||||
imagePullPolicy: Always
|
||||
name: main
|
||||
resources: {}
|
||||
|
@ -59,7 +59,7 @@ status:
|
|||
status: "True"
|
||||
type: PodScheduled
|
||||
containerStatuses:
|
||||
- image: doesnt-exist
|
||||
- image: does-not-exist
|
||||
imageID: ""
|
||||
lastState: {}
|
||||
name: main
|
||||
|
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
uid: 46c1e8de-f61b-11e8-a057-fe5f49266390
|
||||
spec:
|
||||
containers:
|
||||
- image: doesnt-exist
|
||||
- image: does-not-exist
|
||||
imagePullPolicy: Always
|
||||
name: main
|
||||
resources: {}
|
||||
|
@ -58,7 +58,7 @@ status:
|
|||
status: "True"
|
||||
type: PodScheduled
|
||||
containerStatuses:
|
||||
- image: doesnt-exist
|
||||
- image: does-not-exist
|
||||
imageID: ""
|
||||
lastState: {}
|
||||
name: main
|
||||
|
|
|
@ -16,6 +16,7 @@ const (
|
|||
AnnotationKeyHook = "argocd.argoproj.io/hook"
|
||||
// AnnotationKeyHookDeletePolicy is the policy of deleting a hook
|
||||
AnnotationKeyHookDeletePolicy = "argocd.argoproj.io/hook-delete-policy"
|
||||
AnnotationDeletionApproved = "argocd.argoproj.io/deletion-approved"
|
||||
|
||||
// Sync option that disables dry run in resource is missing in the cluster
|
||||
SyncOptionSkipDryRunOnMissingResource = "SkipDryRunOnMissingResource=true"
|
||||
|
@ -23,6 +24,31 @@ const (
|
|||
SyncOptionDisablePrune = "Prune=false"
|
||||
// Sync option that disables resource validation
|
||||
SyncOptionsDisableValidation = "Validate=false"
|
||||
// Sync option that enables pruneLast
|
||||
SyncOptionPruneLast = "PruneLast=true"
|
||||
// Sync option that enables use of replace or create command instead of apply
|
||||
SyncOptionReplace = "Replace=true"
|
||||
// Sync option that enables use of --force flag, delete and re-create
|
||||
SyncOptionForce = "Force=true"
|
||||
// Sync option that enables use of --server-side flag instead of client-side
|
||||
SyncOptionServerSideApply = "ServerSideApply=true"
|
||||
// Sync option that disables use of --server-side flag instead of client-side
|
||||
SyncOptionDisableServerSideApply = "ServerSideApply=false"
|
||||
// Sync option that disables resource deletion
|
||||
SyncOptionDisableDeletion = "Delete=false"
|
||||
// Sync option that sync only out of sync resources
|
||||
SyncOptionApplyOutOfSyncOnly = "ApplyOutOfSyncOnly=true"
|
||||
// Sync option that requires confirmation before deleting the resource
|
||||
SyncOptionDeleteRequireConfirm = "Delete=confirm"
|
||||
// Sync option that requires confirmation before deleting the resource
|
||||
SyncOptionPruneRequireConfirm = "Prune=confirm"
|
||||
// Sync option that enables client-side apply migration
|
||||
SyncOptionClientSideApplyMigration = "ClientSideApplyMigration=true"
|
||||
// Sync option that disables client-side apply migration
|
||||
SyncOptionDisableClientSideApplyMigration = "ClientSideApplyMigration=false"
|
||||
|
||||
// Default field manager for client-side apply migration
|
||||
DefaultClientSideApplyMigrationManager = "kubectl-client-side-apply"
|
||||
)
|
||||
|
||||
type PermissionValidator func(un *unstructured.Unstructured, res *metav1.APIResource) error
|
||||
|
@ -97,7 +123,6 @@ func NewHookType(t string) (HookType, bool) {
|
|||
t == string(HookTypePostSync) ||
|
||||
t == string(HookTypeSyncFail) ||
|
||||
t == string(HookTypeSkip)
|
||||
|
||||
}
|
||||
|
||||
type HookDeletePolicy string
|
||||
|
@ -118,6 +143,10 @@ func NewHookDeletePolicy(p string) (HookDeletePolicy, bool) {
|
|||
type ResourceSyncResult struct {
|
||||
// holds associated resource key
|
||||
ResourceKey kube.ResourceKey
|
||||
// Images holds the images associated with the resource. These images are collected on a best-effort basis
|
||||
// from fields used by known workload resources. This does not necessarily reflect the exact list of images
|
||||
// used by workloads in the application.
|
||||
Images []string
|
||||
// holds resource version
|
||||
Version string
|
||||
// holds the execution order
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue