Compare commits

...

550 Commits

Author SHA1 Message Date
karmada-bot f997f3b236
Merge pull request #6726 from liaolecheng/document/sample
Update sample file of Karmada operator
2025-09-03 08:32:02 +08:00
karmada-bot 56ca7ae4e9
Merge pull request #6725 from liaolecheng/document/maintain
Stop maintaining version 1.12 and maintain version 1.15
2025-09-02 16:34:01 +08:00
liaolecheng 0293499bbb Update sample file of Karmada operator
Signed-off-by: liaolecheng <1079969914@qq.com>
2025-09-02 14:42:55 +08:00
liaolecheng a7c36d109e Stop maintaining version 1.12 and maintain version 1.15
Signed-off-by: liaolecheng <1079969914@qq.com>
2025-09-02 14:28:27 +08:00
karmada-bot dc90876712
Merge pull request #6723 from zhzhuang-zju/mulitcomponent
fix the issue that rbSpec.Components is not updated when the template is updated
2025-09-02 11:05:01 +08:00
zhzhuang-zju 65a29d4094 fix the issue that rbSpec.Components is not updated when the template is updated
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-09-01 19:40:55 +08:00
karmada-bot b030687997
Merge pull request #6650 from XiShanYongYe-Chang/add-serviceexport-ut
Add UT for service-export-controller
2025-09-01 16:13:01 +08:00
karmada-bot d410a0be5e
Merge pull request #6721 from zhzhuang-zju/releasenote-new
publish release v1.15.0, v1.14.4, v1.13.7, v1.12.10
2025-08-30 19:51:59 +08:00
zhzhuang-zju 7d9d366798 publish release v1.15.0, v1.14.4, v1.13.7, v1.12.10
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-08-30 18:53:24 +08:00
changzhen 357aa96976 add ut for serviceexport controller
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-30 17:37:56 +08:00
karmada-bot d4402a750f
Merge pull request #6535 from RainbowMango/pr_update_multitemplate_proposal
Refine Multiple Pod Templates Scheduling proposal
2025-08-30 17:11:58 +08:00
karmada-bot 82c383f3f6
Merge pull request #6720 from zhzhuang-zju/upstream/rbsuspension
fix wrong test name
2025-08-30 14:50:58 +08:00
zhzhuang-zju d8593e9b80 cleanup
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-08-30 12:00:40 +08:00
karmada-bot 0c75b330a9
Merge pull request #6707 from zhzhuang-zju/upstream/rbsuspension
fix the issue that the binding's suspension persists when the policy deletes the suspension configuration
2025-08-30 10:40:58 +08:00
zhzhuang-zju b9a85d4e35 fix the issue that the binding's suspension persists when the policy deletes the suspension configuration
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-08-30 09:43:27 +08:00
karmada-bot 82519160c4
Merge pull request #6699 from zclyne/yifan/component-name-validation
added validation check for the components section of resourcebinding
2025-08-29 18:35:58 +08:00
karmada-bot 1258855dd8
Merge pull request #6671 from liaolecheng/feature/direct-purge-delete-before-create
Ensure in directly purgeMode: resources removed from old cluster before being created in new cluster
2025-08-29 18:34:57 +08:00
liaolecheng 2746a92030 ensure that under Directly purgeMode, resources are first deleted from the old cluster and then created in the new cluster.
Signed-off-by: liaolecheng <1079969914@qq.com>
2025-08-29 17:34:32 +08:00
Yifan Zhang bcbf425b28 added validation check for the components section of resourcebinding
Signed-off-by: Yifan Zhang <zyfinori@gmail.com>
2025-08-29 17:13:51 +08:00
karmada-bot caa18b1146
Merge pull request #6708 from RainbowMango/pr_rename_ComponentRequirements
Rename ComponentRequirements field in ResourceBinding
2025-08-29 16:47:58 +08:00
karmada-bot 0a4e1d3bbf
Merge pull request #6666 from liaolecheng/feature/stateful-workload-cluster-failover
Build gracefulEviction task with ClusterFailoverBehavior
2025-08-29 16:27:58 +08:00
liaolecheng 4b3997f850 build gracefulEviction task with ClusterFailoverBehavior
Signed-off-by: liaolecheng <1079969914@qq.com>
2025-08-29 14:43:08 +08:00
karmada-bot e95003cefc
Merge pull request #6696 from seanlaii/karmadactl-component
Implement `interpretComponent` for `karmadactl interpret`
2025-08-29 12:22:57 +08:00
karmada-bot 8e2d872e8f
Merge pull request #6690 from XiShanYongYe-Chang/fix-6676
purgeMode not properly set when tolerationTime is 0 when cluster failover occurs
2025-08-29 12:02:58 +08:00
changzhen ad9cae7daf purgeMode not properly set when tolerationTime is 0 when cluster failover occurs
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-29 11:06:07 +08:00
karmada-bot 0b929ac04d
Merge pull request #6710 from mszacillo/flinkdep-interpreter-fix
Using json-safe clone to prevent shared table references in FlinkDeployment luaresult
2025-08-29 09:25:58 +08:00
karmada-bot 232e731240
Merge pull request #6674 from zhzhuang-zju/changebykarmada
fix the issue that the relevant fields in rb and pp are inconsistent
2025-08-29 09:14:57 +08:00
mszacillo 7b3798aeec Using json-safe clone to prevent shared table references in FlinkDeployment luaresult
Signed-off-by: mszacillo <mszacillo@bloomberg.net>
2025-08-28 16:30:53 -04:00
RainbowMango 925ffe9371 Rename ComponentRequirements field in ResourceBinding for get rid of redundant namespace
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-08-28 18:04:47 +08:00
karmada-bot e7ecc935cb
Merge pull request #6701 from liaolecheng/feature/enhance-quota
Enhance the quota statistics
2025-08-28 15:47:57 +08:00
karmada-bot 3b4768350e
Merge pull request #6495 from XiShanYongYe-Chang/support-stateful-cluster-failover
[Proposal] Support cluster failover for stateful workloads
2025-08-28 15:36:57 +08:00
karmada-bot aa0afafdd3
Merge pull request #6686 from XiShanYongYe-Chang/consider-apienablement-complete
When controller calls IsAPIEnabled, it also considers whether the judgment result is trustworthy
2025-08-28 15:16:57 +08:00
changzhen 0876615df2 When controller calls IsAPIEnabled, it also considers whether the judgment result is trustworthy
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-28 14:19:08 +08:00
karmada-bot ff7e09785c
Merge pull request #6697 from mszacillo/flinkdep-interpreter
Adding componentResource calculation for FlinkDeployment
2025-08-28 12:03:56 +08:00
liaolecheng 7e9dd8d372 Enhance the quota statistics.
Signed-off-by: liaolecheng <1079969914@qq.com>
2025-08-28 11:27:24 +08:00
mszacillo 5c64613d0c Adding componentResource calculation for FlinkDeployment
Signed-off-by: mszacillo <mszacillo@bloomberg.net>
2025-08-27 16:19:04 -04:00
wei-chenglai c08ce424aa Implement `interpretComponent` for `karmadactl interpret`
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-08-27 12:10:10 -04:00
karmada-bot 1b8626397f
Merge pull request #6700 from jabellard/unifedauth-json
JSON logging for unified auth controller
2025-08-27 18:16:56 +08:00
karmada-bot 78ffbd5280
Merge pull request #6597 from LivingCcj/feature/optimize_karmada_scheduler
Improve the performance of karmada-scheduler
2025-08-27 15:01:56 +08:00
karmada-bot 7148248453
Merge pull request #6541 from XiShanYongYe-Chang/kube-cache-mutation-detector
enable mutation detection in e2e
2025-08-27 10:29:56 +08:00
Joe Nathan Abellard f5a4ba9186 Structured logging for unified auth controller
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-08-26 21:37:55 -04:00
karmada-bot 6013c0b6e9
Merge pull request #6581 from zclyne/yifan/fix-wsl2-local-up
hack/local-up-karmada.sh supports WSL2
2025-08-27 09:26:55 +08:00
karmada-bot ea357c2ea2
Merge pull request #6656 from zhzhuang-zju/lazy-cleanup
fixed the issue where pp-related claimMetadata was not properly cleaned up
2025-08-26 21:18:56 +08:00
zhzhuang-zju 9889226422 fixed the issue where pp-related claimMetadata was not properly cleaned up
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-08-26 19:38:56 +08:00
karmada-bot 981f489dfe
Merge pull request #6631 from seanlaii/scheduler-featuregate
Add feature gates support to karmada-scheduler config (Helm Chart)
2025-08-26 17:34:56 +08:00
karmada-bot 33ebc2d45b
Merge pull request #6523 from XiShanYongYe-Chang/fix-6522
Fixed the issue that reporting repeat EndpointSlice resources leads to duplicate backend IPs
2025-08-26 11:54:55 +08:00
zhzhuang-zju 5c2fbc293c fix the issue that the relevant fields in rb and pp are inconsistent
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-08-26 10:57:43 +08:00
karmada-bot 997a0a1f1a
Merge pull request #6687 from seanlaii/thirdparty-interpreter
Implement `GetComponents` for `thirdpartyInterpreter`
2025-08-26 10:46:54 +08:00
karmada-bot 5469b5e5ef
Merge pull request #6689 from karmada-io/dependabot/github_actions/actions/checkout-5
Bump actions/checkout from 4 to 5
2025-08-25 19:43:54 +08:00
dependabot[bot] 27957d4fdb
Bump actions/checkout from 4 to 5
Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-08-25 10:27:18 +00:00
LivingCcj 2841f04cbd optimize karmada scheduler step
Signed-off-by: LivingCcj <livingccj@163.com>
2025-08-25 15:52:31 +08:00
changzhen d2f22e8a43 Exclude EndpointSlice resources managed by Karmada system to avoid duplicate reporting
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-25 15:24:41 +08:00
wei-chenglai 7b5ac69dec Implement `GetComponents` for `thirdpartyInterpreter`
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-08-24 23:42:55 -04:00
karmada-bot 17caa1bc7d
Merge pull request #6678 from seanlaii/configurableinterpreter
Implement `GetComponents` for `ConfigurableInterpreter`
2025-08-25 10:42:54 +08:00
wei-chenglai 3a294dd3bf Implement GetComponents method for ConfigurableInterpreter
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-08-23 14:59:45 -04:00
changzhen 69bf770da5 enable mutation detection in e2e
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-22 11:10:20 +08:00
karmada-bot 00d5f45335
Merge pull request #6669 from XiShanYongYe-Chang/promote-resource-interpreter
Fix the issue that the cache is out of sync within the resource interpreter when using karmadactl promote
2025-08-22 10:46:52 +08:00
changzhen bdf17e6909 Fix the issue that the cache is out of sync within the resource interpreter when using karmadactl promote
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-22 09:51:22 +08:00
karmada-bot 708f3e453a
Merge pull request #6667 from XiShanYongYe-Chang/add-validate-for-aliasLabelName
add validation for policyv1alpha1.StatePreservation.Rules[*].AliasLabelName
2025-08-22 09:30:51 +08:00
karmada-bot 017379f7e9
Merge pull request #6664 from XiShanYongYe-Chang/add-comments-for-serviceexport-controller
add comments for service-export-controller
2025-08-22 09:11:51 +08:00
changzhen 2e121f16cd add comments for service-export-controller
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-21 21:16:11 +08:00
karmada-bot 1c6f1db6df
Merge pull request #6665 from seanlaii/getcomponent-detector
Resolve binding components using GetComponents interpreter hook
2025-08-21 15:03:51 +08:00
karmada-bot 40bf0ab3a1
Merge pull request #6603 from whitewindmills/fix-httpserver
Fix the issue of infinite loop caused by connection failure
2025-08-21 11:19:51 +08:00
karmada-bot f3775f9dff
Merge pull request #6662 from ctripcloud/pq
feat: introduce ControllerPriorityQueue feature gate
2025-08-21 10:00:50 +08:00
wei-chenglai 5e3e25aa3c Resolve binding components using GetComponents interpreter hook
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-08-20 20:34:03 -04:00
karmada-bot 601081ebf2
Merge pull request #6668 from RainbowMango/pr_introduce_interpreter_operation
Introduce new interpreter operation InterpretComponent
2025-08-20 23:51:50 +08:00
RainbowMango 440eada373 Introduce new interpreter operation InterpretComponent
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-08-20 18:55:06 +08:00
changzhen 9edc1b4940 add validation for policyv1alpha1.StatePreservation.Rules[*].AliasLabelName
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-20 16:03:50 +08:00
zach593 201f2d9175 feat: introduce ControllerPriorityQueue feature gate
Signed-off-by: zach593 <zach_li@outlook.com>
2025-08-19 22:49:09 +08:00
whitewindmills f5bd00b28c Fix the issue of infinite loop caused by connection failure
Signed-off-by: whitewindmills <jayfantasyhjh@gmail.com>
2025-08-19 17:39:03 +08:00
karmada-bot 8b1404681c
Merge pull request #6610 from XiShanYongYe-Chang/add-cluster-failover-behavior-api
Introduced `spec.failover.cluster` to the PropagationPolicy API for application-level cluster failover
2025-08-19 15:40:50 +08:00
karmada-bot 5afc4ecd42
Merge pull request #6659 from RainbowMango/pr_extend_resource_interpreter_interface
Extend the Resource Interpreter interface for multi-components workload scheduling
2025-08-19 12:39:49 +08:00
RainbowMango 2cea078eba Extend the Resource Interpreter interface for multi-components workload scheduling.
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-08-19 11:46:30 +08:00
changzhen 8fafe59f7e add ClusterFailoverBehavior API in PropagationSpec to support application-level cluster failover
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-19 10:30:58 +08:00
karmada-bot 4418fb007f
Merge pull request #6387 from XiShanYongYe-Chang/deprecated-some-purgemode
Introduced Directly, Gracefully PurgeMode, and deprecated Immediately and Graciously PurgeMode
2025-08-19 10:09:48 +08:00
karmada-bot f85bf8fc89
Merge pull request #6647 from zhzhuang-zju/informerstop
Fix unreasonable informer manager stop behavior
2025-08-18 20:50:48 +08:00
Tessa Pham 8bf67146c1
structured logging for hpascaletargetmarker controller (#6654)
* structured logging for hpascaletargetmarker controller

Signed-off-by: Tessa Pham <hpham111@bloomberg.net>

* make error messages more detailed

Signed-off-by: Tessa Pham <hpham111@bloomberg.net>

---------

Signed-off-by: Tessa Pham <hpham111@bloomberg.net>
2025-08-18 20:20:49 +08:00
karmada-bot 980ee22886
Merge pull request #6649 from RainbowMango/pr_extend_rb_for_components_scheduling
Introduce components fields to ResourceBinding API
2025-08-18 10:27:49 +08:00
karmada-bot 567594441d
Merge pull request #6655 from seanlaii/multipod-featuregate
Introduce MultiplePodTemplatesScheduling feature gate
2025-08-18 09:12:48 +08:00
wei-chenglai eb2cf6aaab Introduce MultiplePodTemplatesScheduling feature gate
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-08-15 22:03:34 -04:00
wei-chenglai 632754df63 Add feature gates support to karmada-scheduler config (Helm Chart)
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-08-15 21:21:55 -04:00
RainbowMango 8a477540bb Introduce components fields to ResourceBinding API.
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-08-15 18:46:05 +08:00
karmada-bot b1ea332546
Merge pull request #6652 from zhzhuang-zju/releasenote-new
publish release v1.15.0-rc.0, v1.14.3, v1.13.6, v1.12.9
2025-08-15 18:11:46 +08:00
zhzhuang-zju 952d0f0e4e publish release v1.15.0-rc.0,v1.14.3,v1.13.6,v1.12.9
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-08-15 17:01:08 +08:00
karmada-bot bef7fce5cb
Merge pull request #6651 from tessapham/fix-fmt
fix formatted strings
2025-08-15 09:42:46 +08:00
Tessa Pham 74aeab08b8 fix formatted strings
Signed-off-by: Tessa Pham <hpham111@bloomberg.net>
2025-08-14 10:31:05 -04:00
zhzhuang-zju 6efd86bb31 Fix unreasonable informer stop behavior
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-08-14 15:57:29 +08:00
karmada-bot dfdd9a5207
Merge pull request #6544 from zhzhuang-zju/cachemutation
detector: eliminate the mutation of the informer cache
2025-08-13 19:11:44 +08:00
karmada-bot e335bc242c
Merge pull request #6627 from tessapham/fix-filename
update -f flag description for karmadactl interpret
2025-08-13 16:19:45 +08:00
karmada-bot 8f9f9ef722
Merge pull request #6611 from liaolecheng/refactor/resourcebinding-validation
Optimize the ResourceBinding validation webhook for better extensibility
2025-08-13 15:21:46 +08:00
zhzhuang-zju d0f7464246 detector: eliminate the mutation of the informer cache
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-08-13 14:54:49 +08:00
karmada-bot 6bcb4774c5
Merge pull request #6626 from XiShanYongYe-Chang/ensure-resource-interpreter-cache
ensure resource interpreter cache sync before starting controllers
2025-08-13 14:32:44 +08:00
liaolecheng e81e311dcd Refactor resourcebinding validating webhook for extensibility
Signed-off-by: liaolecheng <1079969914@qq.com>
2025-08-13 14:25:31 +08:00
Tessa Pham 0243055f7f update -f flag description
Signed-off-by: Tessa Pham <hpham111@bloomberg.net>
2025-08-13 02:24:07 -04:00
karmada-bot de58287f3c
Merge pull request #6630 from liaolecheng/update/go1.24.6
Bump golang to 1.24.6
2025-08-13 12:25:44 +08:00
liaolecheng 55678eaed4 Bump golang to 1.24.6
Signed-off-by: liaolecheng <1079969914@qq.com>
2025-08-13 11:32:38 +08:00
changzhen 6f93ef0e5a ensure resource interpreter cache sync before starting controllers
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-13 10:42:25 +08:00
karmada-bot 414cb996b3
Merge pull request #6554 from NickYadance/sync-searchcache-before-start
Synchronize search cache before starting Karmada search controller
2025-08-12 17:24:44 +08:00
karmada-bot 7eafdfd269
Merge pull request #6622 from XiShanYongYe-Chang/fix-6616
When using MCS and MCI simultaneously, prevent resource residuce caused by deleting MCS and MCI
2025-08-12 09:32:44 +08:00
changzhen 99b7902307 merge EndpointSlice Work finalizers instead of overwriting it
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-11 21:13:03 +08:00
karmada-bot 5247671fa1
Merge pull request #6623 from karmada-io/dependabot/github_actions/actions/download-artifact-5
Bump actions/download-artifact from 4 to 5
2025-08-11 19:46:43 +08:00
dependabot[bot] a93e9fe1e7
Bump actions/download-artifact from 4 to 5
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4 to 5.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-08-11 08:32:39 +00:00
karmada-bot 94a8a8eef4
Merge pull request #6481 from mszacillo/quota-exceeded-condition
Add quota exceeded conditions in case scheduler fails to update resourcebinding
2025-08-11 15:30:43 +08:00
karmada-bot 8bcd2c3473
Merge pull request #6612 from liaolecheng/fix/hookManagerSynced
Ensure hookManager waits for informer cache sync before reporting as synced in CustomizedInterpreter
2025-08-11 14:18:43 +08:00
yi.wu 9ba4d2f5ac Synchronize search cache before starting Karmada search controller
Signed-off-by: yi.wu <yi.wu@shopee.com>
2025-08-11 11:53:50 +08:00
karmada-bot 5380f6159e
Merge pull request #6551 from CaesarTY/mcs-ctrl-log
refactor: enable JSON logging in mcs controller
2025-08-08 11:02:40 +08:00
karmada-bot 9b6adfd2d8
Merge pull request #6596 from jabellard/operator-env
Add support for specifying env vars and extra args for the Karmada operator
2025-08-08 10:35:40 +08:00
liaolecheng cee1d9bb98 Fix the issue that the cache is out of sync when hookManager's HasSynced is queried in CustomizedInterpreter
Signed-off-by: liaolecheng <1079969914@qq.com>
2025-08-08 10:33:29 +08:00
karmada-bot 108073c6bc
Merge pull request #6577 from jabellard/crd-proxy2
Proxy Support for Custom HTTP Source CRD Download Strategy in Karmada Operator
2025-08-08 09:29:40 +08:00
yteng35 99583d7273 refactor: enable JSON logging in mcs controller
Signed-off-by: yteng35 <yteng35@bloomberg.net>

nit

Signed-off-by: yteng35 <yteng35@bloomberg.net>

address comments

Signed-off-by: yteng35 <yteng35@bloomberg.net>

add separate log keys for the name and namespace

Signed-off-by: yteng35 <yteng35@bloomberg.net>

add workName key to reduce confusion

Signed-off-by: yteng35 <yteng35@bloomberg.net>
2025-08-07 09:57:44 -04:00
Joe Nathan Abellard c35162b834 Implementation
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-08-07 08:42:08 -04:00
Joe Nathan Abellard 5718bbd95e Add support for specifying env vars for Karmada operator
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-08-07 08:20:19 -04:00
Yifan 696e2a9a75 hack/local-up-karmada.sh supports WSL2
added log when getting ip address on wsl2 environment

replaced special character that is only available on macos with =; verify ip address for WSL2 environment

Signed-off-by: Yifan <zyfinori@gmail.com>
2025-08-06 23:12:13 -04:00
karmada-bot 64fbe7560d
Merge pull request #6574 from jabellard/crd-proxy
[Proposal]: Proxy Support for Custom HTTP Source CRD Download Strategy in Karmada Operator
2025-08-07 09:25:39 +08:00
Joe Nathan Abellard 28bee90e5e Add design doc
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-08-06 10:30:41 -04:00
karmada-bot bb4ec94ca9
Merge pull request #6570 from NUMBKV/struct-log-for-deploy-replicas-syncer-controller
JSON logging for deployment replicas syncer controller
2025-08-06 20:16:38 +08:00
karmada-bot 3ff577c1a1
Merge pull request #6602 from XiShanYongYe-Chang/wair-for-cache-in-ConfigurableInterpreter
Ensure configManager waits for informer cache sync before reporting as synced in ConfigurableInterpreter
2025-08-06 19:29:41 +08:00
karmada-bot bd7d74cd1e
Merge pull request #6566 from SujoyDutta/sdutta/application-failover-logs
application failover klog structured logging
2025-08-06 19:28:39 +08:00
kdeng46 88b8fdb13f Enhance logging in DeploymentReplicasSyncer for better clarity and debugging
Signed-off-by: kdeng46 <kdeng46@bloomberg.net>
2025-08-06 19:21:41 +08:00
changzhen b7b1d9ea39 Fix the issue that the cache is out of sync when configManager's HasSynced is queried in ConfigurableInterpreter
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-06 17:27:59 +08:00
sdutta133 3bd480e8ba klog structured logging
Signed-off-by: sdutta133 <sdutta133@bloomberg.net>

fix lint issues

fix cicd

Signed-off-by: sdutta133 <sdutta133@bloomberg.net>

consistent naming

Signed-off-by: sdutta133 <sdutta133@bloomberg.net>
2025-08-05 14:09:32 -04:00
karmada-bot f5ee2ce432
Merge pull request #6555 from mohit-nagaraj/master
refactor(app): improve logging in graceful eviction controllers
2025-08-05 16:36:37 +08:00
karmada-bot 140f745e03
Merge pull request #6592 from zzklachlan/json-logging-federated-resource-quota
Update logging format for federated resource quota
2025-08-05 00:38:37 +08:00
Kai Zhang 4f8892d55d Update logging calls in federated resource quota
Signed-off-by: Kai Zhang <zzhang905@bloomberg.net>

Address gemini code review

Signed-off-by: Kai Zhang <zzhang905@bloomberg.net>
2025-08-04 10:10:22 -04:00
Mohit Nagaraj b1138ed602 refactor(app): improve logging in graceful eviction controllers to use structured logging
Signed-off-by: Mohit Nagaraj <mohitnagaraj20@gmail.com>
2025-08-04 12:16:50 +00:00
karmada-bot b8a9249f26
Merge pull request #6571 from greenmoon55/cronfederatedhpa-logs
structured logging for cronfederatedhpa controllers
2025-08-04 15:12:37 +08:00
Jin Dong cfa08e4ca0 structured logging for cronfederatedhpa controllers
Signed-off-by: Jin Dong <greenmoon55@gmail.com>

Commit suggestions from gemini

Signed-off-by: Jin Dong <greenmoon55@gmail.com>

Log namespace and name seperately

Signed-off-by: Jin Dong <greenmoon55@gmail.com>
2025-08-02 21:01:53 -04:00
changzhen f80c61373d add proposal: Support cluster failover for stateful workloads
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-08-02 17:29:44 +08:00
karmada-bot b8f6874c58
Merge pull request #6560 from karmada-io/dependabot/docker/cluster/images/alpine-3.22.1
Bump alpine from 3.22.0 to 3.22.1 in /cluster/images
2025-08-01 11:21:34 +08:00
karmada-bot 46495a227c
Merge pull request #6591 from zhzhuang-zju/releasenote-new
publish release v1.15.0-beta.0,v1.14.2,v1.13.5,v1.12.8
2025-07-31 20:50:34 +08:00
karmada-bot 295c69f76a
Merge pull request #6578 from cbaenziger/work_status_controller_structured_logging
Work status controller structured logging
2025-07-31 20:19:34 +08:00
zhzhuang-zju 7c30e24097 publish release v1.15.0-beta.0,v1.14.2,v1.13.5,v1.12.8
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-07-31 18:53:52 +08:00
Clay Baenziger de65bc7097 Use structured logging for work status controller
Signed-off-by: Clay Baenziger <cwb@clayb.net>
2025-07-31 02:48:41 -06:00
karmada-bot f67dcc954e
Merge pull request #6469 from zclyne/yifan/workloadrebalancer-json-logging
workloadbalancer controller now supports json format logging
2025-07-30 15:35:33 +08:00
Yifan 4bf059ffec workloadbalancer controller now supports json format logging
Signed-off-by: Yifan <zyfinori@gmail.com>
2025-07-30 14:38:45 +08:00
Zhuyu Li 2d74aee39d
Use Structure Logging Enhancement for `Cluster Resource Binding Controller` (#6576)
* use structured logging for cluster resource binding controller

Signed-off-by: zhuyulicfc49 <zyliw49@gmail.com>

* Improve logs by using 'name' as key and log name only

Signed-off-by: zhuyulicfc49 <zyliw49@gmail.com>

---------

Signed-off-by: zhuyulicfc49 <zyliw49@gmail.com>
2025-07-29 10:27:32 +08:00
karmada-bot cb0acc0d61
Merge pull request #6575 from jabellard/group-operator-proposals
Group operator design documents under `docs/proposals/karmada-operator` directory
2025-07-29 10:22:31 +08:00
karmada-bot b30c170c96
Merge pull request #6567 from wangbowen1401/master
JSON logging for RB Status Controller
2025-07-29 10:08:31 +08:00
RainbowMango e160bfcf1d Refine Multiple Pod Templates Scheduling proposal
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-07-28 21:54:16 +08:00
Zhang Zhang a2c2057761
Use structured log for karmada operator (#6564)
* use structured log

Signed-off-by: zhangsquared <hi.zhangzhang@gmail.com>

* addressing comment

Signed-off-by: zhangsquared <hi.zhangzhang@gmail.com>

---------

Signed-off-by: zhangsquared <hi.zhangzhang@gmail.com>
2025-07-28 14:46:30 +08:00
karmada-bot a2980eb1b6
Merge pull request #6434 from XiShanYongYe-Chang/fix-6433
Ensure EndpointSlice informer cache is synced before reporting EndpointSlice
2025-07-26 15:02:29 +08:00
karmada-bot db76ef01e2
Merge pull request #6565 from jennryaz/erya-slog2
Use structured logging for cluster status controller
2025-07-26 14:40:28 +08:00
Eugene Ryazanov a4795803d1 Use structured logging for cluster status controller
Signed-off-by: Eugene Ryazanov <yryazanov@bloomberg.net>
2025-07-26 14:37:26 +08:00
Bowen Wang fd8241598c Update RB status controller to use JSON logging
Signed-off-by: Bowen Wang <43794678+wangbowen1401@users.noreply.github.com>
2025-07-25 20:28:37 +00:00
changzhen c3e5b1dc7b deprecated Immediately and Graciously PurgeMode
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-07-25 11:29:29 +08:00
changzhen b0560d63dd wait for EndpointSlice informer synced when report EndpointSlice
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-07-25 10:44:16 +08:00
Joe Nathan Abellard 47b5bdcafd Group operator design documents
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-07-24 19:48:36 -04:00
karmada-bot 5f4bd5e765
Merge pull request #6546 from Bhaumik10/mcs-crtl-logs
JSON logging for multiclusterservice controllers
2025-07-24 18:40:27 +08:00
mszacillo ee054544c4 Add quota exceeded conditions in case scheduler fails to update resourcebinding
Signed-off-by: mszacillo <mszacillo@bloomberg.net>
2025-07-23 14:19:22 +02:00
karmada-bot 3024d3321e
Merge pull request #6454 from zclyne/yifan/scheduler-unit-test-port
make healthz/metrics server ports of the scheduler/descheduler unit tests to be detected automatically to avoid flaky test failures
2025-07-23 09:57:26 +08:00
Yifan Zhang a1290871ea dynamically pick free ports for healthz and metrics server of the scheduler/descheduler unit test to fix intermittent test failures
Signed-off-by: Yifan Zhang <zyfinori@gmail.com>
2025-07-22 19:54:39 -04:00
Bhaumik Patel bcb3b08376 JSON logging for multiclusterservice controllers
Signed-off-by: Bhaumik Patel <bhaumikpatel029@gmail.com>

Address comments

Signed-off-by: Bhaumik Patel <bhaumikpatel029@gmail.com>

Address comments

Address comments

Address comments
2025-07-22 09:23:19 -04:00
karmada-bot f4f63c8d25
Merge pull request #6545 from nihar4276/federatedhpajsonlogging
Add Structured Logging for Federated HPA controller
2025-07-22 15:02:26 +08:00
karmada-bot be98c622e0
Merge pull request #6563 from karmada-io/dependabot/github_actions/sigstore/cosign-installer-3.9.2
Bump sigstore/cosign-installer from 3.9.1 to 3.9.2
2025-07-22 09:49:26 +08:00
karmada-bot 84359efa64
Merge pull request #6533 from abhi0324/add-UnitedDeployment
feat: add resource interpreter customization for UnitedDeployment
2025-07-21 21:21:25 +08:00
Abhiswant Chaudhary 7bf8413888
feat: add resource interpreter customization for UnitedDeployment
Signed-off-by: Abhiswant Chaudhary <abhiswant0324@gmail.com>
2025-07-21 14:06:12 +05:30
karmada-bot 95802b0204
Merge pull request #6524 from abhi0324/add-SideCarSet
feat: add resource interpreter customization for SidecarSet
2025-07-21 16:13:25 +08:00
dependabot[bot] 96f4744eb2
Bump sigstore/cosign-installer from 3.9.1 to 3.9.2
Bumps [sigstore/cosign-installer](https://github.com/sigstore/cosign-installer) from 3.9.1 to 3.9.2.
- [Release notes](https://github.com/sigstore/cosign-installer/releases)
- [Commits](https://github.com/sigstore/cosign-installer/compare/v3.9.1...v3.9.2)

---
updated-dependencies:
- dependency-name: sigstore/cosign-installer
  dependency-version: 3.9.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-07-21 06:53:18 +00:00
dependabot[bot] 1aeed5a32e
Bump alpine from 3.22.0 to 3.22.1 in /cluster/images
Bumps alpine from 3.22.0 to 3.22.1.

---
updated-dependencies:
- dependency-name: alpine
  dependency-version: 3.22.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-07-21 06:40:29 +00:00
karmada-bot 592fa3224d
Merge pull request #6558 from seanlaii/controller-gen-18
Bump controller-gen version to v0.18.0
2025-07-21 11:21:24 +08:00
karmada-bot ff1b8fd429
Merge pull request #6556 from seanlaii/k8s-1.33
Update Kubernetes versions in CI workflows to include v1.33.0
2025-07-21 10:51:24 +08:00
wei-chenglai 001c107025 Bump controller-gen version to v0.18.0
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-07-20 22:23:19 -04:00
karmada-bot 9824e3d8f8
Merge pull request #6512 from XiShanYongYe-Chang/remove-instruction-label
Delete the propagation.karmada.io/instruction label
2025-07-21 10:00:25 +08:00
karmada-bot 0b5fe5ec82
Merge pull request #6557 from seanlaii/go-1.24.5
Bump go version to 1.24.5
2025-07-21 09:59:24 +08:00
wei-chenglai 292328b23f Update Kubernetes versions in CI workflows to include v1.33.0
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-07-20 21:41:22 -04:00
wei-chenglai 3c6394a391 Bump go version to 1.24.4
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-07-20 21:06:56 -04:00
karmada-bot f53dffef80
Merge pull request #6550 from liaolecheng/update/mockery-to-v3.5.1
Bump github.com/vektra/mockery to v3.5.1
2025-07-18 15:11:21 +08:00
karmada-bot ddfd650db4
Merge pull request #6549 from seanlaii/kind-0.29
Bump sigs.k8s.io/kind to v0.29.0
2025-07-18 14:53:22 +08:00
karmada-bot 538aff6c9e
Merge pull request #6552 from iawia002/kustomize-action
Fix the rate-limit issue of kustomize action
2025-07-18 14:32:22 +08:00
wei-chenglai f281d69594 Bump sigs.k8s.io/kind to v0.29.0
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-07-18 00:16:23 -04:00
Xinzhao Xu e2330eba0f Fix the rate-limit issue of kustomize action
Signed-off-by: Xinzhao Xu <z2d@jifangcheng.com>
2025-07-18 12:01:51 +08:00
changzhen 027741bd38 delete hte propagation.karmada.io/instruction label
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-07-18 11:33:05 +08:00
liaolecheng 868be81689 update mockery to v3.5.1
Signed-off-by: liaolecheng <1079969914@qq.com>
2025-07-18 11:25:12 +08:00
nrao65 689680162e Add Structured Logging for Federated HPA controller
Signed-off-by: nrao65 <nrao65@bloomberg.net>
2025-07-17 11:20:58 -04:00
karmada-bot f4919abce5
Merge pull request #6548 from seanlaii/metrics-server-0.8
Bump sigs.k8s.io/metrics-server to v0.8.0
2025-07-17 16:19:21 +08:00
wei-chenglai 7f988b2dd0 Bump sigs.k8s.io/metrics-server to v0.8.0
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-07-17 00:07:29 -04:00
karmada-bot cf45b57569
Merge pull request #6540 from zhangsquared/log-taint-policy-controller
Use structured logging for cluster taint policy controller
2025-07-17 10:47:21 +08:00
zhangsquared d2694e513c use structured log for taint policy controller
Signed-off-by: zhangsquared <hi.zhangzhang@gmail.com>
2025-07-17 09:54:10 +08:00
Kai Zhang c60a2e05f6
Update logging format for clusterResourceBindingController (#6526)
Signed-off-by: Kai Zhang <zzhang905@bloomberg.net>
Co-authored-by: Kai Zhang <zzhang905@bloomberg.net>
2025-07-15 23:32:20 +08:00
karmada-bot 74df2b4e4d
Merge pull request #6534 from zhzhuang-zju/releasenote-new
publish release v1.15.0-alpha.2
2025-07-15 21:52:20 +08:00
zhzhuang-zju 0e8e6f0e55 publish release v1.15.0-alpha.2
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-07-15 17:33:38 +08:00
karmada-bot aee6e859b5
Merge pull request #6532 from whitewindmills/pr-3370
feat: retain registry credentials for ServiceAccount
2025-07-15 10:00:18 +08:00
whitewindmills e2d70a7534 retain registry credentials for ServiceAccount
Signed-off-by: whitewindmills <jayfantasyhjh@gmail.com>
2025-07-14 17:09:25 +08:00
karmada-bot 55aead1053
Merge pull request #6525 from XiShanYongYe-Chang/fix-6510
Fixed the issue that resources will be recreated after deleted on the cluster when resource is suspended for dispatching
2025-07-14 11:26:18 +08:00
changzhen b8e9c849de don't recreate resource in the member cluster when it is suspended for dispatching
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-07-14 09:50:40 +08:00
Abhiswant Chaudhary 42a772f700
feat: add resource interpreter customization for SidecarSet
Signed-off-by: Abhiswant Chaudhary <abhiswant0324@gmail.com>

feat: add resource interpreter customization for SidecarSet

Signed-off-by: Abhiswant Chaudhary <abhiswant0324@gmail.com>

fix: remove redundant status preservation in SidecarSet retention

Signed-off-by: Abhiswant Chaudhary <abhiswant0324@gmail.com>

fix: remove redundant status preservation in SidecarSet Retain

Signed-off-by: Abhiswant Chaudhary <abhiswant0324@gmail.com>

improve SidecarSet resource interpreter retention and dependency logic

Signed-off-by: Abhiswant Chaudhary <abhiswant0324@gmail.com>
2025-07-12 03:39:17 +05:30
karmada-bot 9966a3fdb7
Merge pull request #5085 from mszacillo/crd-scheduling-proposal
Proposal for multiple pod template support
2025-07-11 17:22:16 +08:00
karmada-bot 59c2837c1b
Merge pull request #6515 from mohamedawnallah/addGeminiCodeAssist
CI: add Gemini assist for code reviews
2025-07-11 11:33:15 +08:00
Mohamed Awnallah d12ea82bc4 CI: add gemini assist for code reviews
Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
2025-07-10 14:33:46 +00:00
karmada-bot 664c7f6c09
Merge pull request #6488 from CaesarTY/cert-ctrl-logs
refactor: enable JSON logging in certificate controller
2025-07-09 16:54:14 +08:00
yteng35 fe06ebc5a0 refactor: replace klog with controller-runtime logger in certificate controller
Signed-off-by: yteng35 <yteng35@bloomberg.net>
2025-07-09 14:13:20 +08:00
karmada-bot efca312a57
Merge pull request #6519 from zhzhuang-zju/trivyfix
Eliminate Trivy abnormal alerts
2025-07-09 12:00:14 +08:00
zhzhuang-zju ef629a240f Eliminate Trivy abnormal alerts
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-07-09 11:07:54 +08:00
karmada-bot d44ab1d1a8
Merge pull request #6517 from seanlaii/fix-search-chart
fix: Update Karmada search template to use search.resources for resources
2025-07-09 09:24:13 +08:00
karmada-bot 1b7bdf4d2b
Merge pull request #6514 from karmada-io/dependabot/github_actions/aquasecurity/trivy-action-0.32.0
Bump aquasecurity/trivy-action from 0.31.0 to 0.32.0
2025-07-08 10:03:13 +08:00
wei-chenglai bd51389f9a fix: Update Karmada search template to use search.resources for resources
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-07-07 15:56:13 -04:00
karmada-bot 0793caac7a
Merge pull request #6498 from RainbowMango/pr_bump_kubernetes_v1332
Bump Kubernetes dependencies from v1.32.3 to v1.33.2
2025-07-07 16:08:13 +08:00
dependabot[bot] e3f0ec122b
Bump aquasecurity/trivy-action from 0.31.0 to 0.32.0
Bumps [aquasecurity/trivy-action](https://github.com/aquasecurity/trivy-action) from 0.31.0 to 0.32.0.
- [Release notes](https://github.com/aquasecurity/trivy-action/releases)
- [Commits](https://github.com/aquasecurity/trivy-action/compare/0.31.0...0.32.0)

---
updated-dependencies:
- dependency-name: aquasecurity/trivy-action
  dependency-version: 0.32.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-07-07 06:43:02 +00:00
RainbowMango 80677b3f77 Adopt Kubernetes breaking change: regarding to effective version in PR129416
Adopt Kubernetes breaking change: change storage interface in PR130400
Adopt Kubernetes breaking change: use CacheDelegator to replace Cacher in PR129443
Adopt Kubernetes breaking change: Refactor explain cmd in PR129784
Adopt controller-runtime changes: Deprecates result.requeue in PR3107
Adopt Kubernetes breaking change: disable watchlist in PR131359

Signed-off-by: changzhen <changzhen5@huawei.com>
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-07-07 11:14:04 +08:00
karmada-bot 3e4771b914
Merge pull request #6462 from abhinav-1305/fix/api-enablement-no-deletion
fix: ensure APIEnablement plugin never deletes scheduled resources
2025-07-04 14:17:10 +08:00
Abhinav Kumar f86aca8816 squash commits Signed-off-by: Abhinav Kumar <abhinavkumar130503@gmail.com>
Signed-off-by: Abhinav Kumar <abhinavkumar130503@gmail.com>
2025-07-04 10:45:03 +08:00
RainbowMango 66d49eb465 Update automatic generated files
Bump sigs.k8s.io/custom-metrics-apiserver to v1.33.0
Bump controller-runtime to v0.21.0
Bump sigs.k8s.io/metrics-server to v0.7.1-0.20250620093659-55b4961bc1ec

Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-07-03 15:57:43 +08:00
RainbowMango b11f53008c Bump Kubernetes dependencies to v1.33.2
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-07-02 17:52:25 +08:00
karmada-bot d74b6da145
Merge pull request #6507 from ritzdevp/rs/logfix
JSON Logging support for Karmada Aggregated API Server
2025-07-02 15:00:08 +08:00
ritzdevp c9efbe26b5 Add structured logging aggregated-apiserver
Signed-off-by: Rituraj Singh <rsingh660@bloomberg.net>

Add persistentprerune

Signed-off-by: Rituraj Singh <rsingh660@bloomberg.net>

Remove whitespace

Signed-off-by: Rituraj Singh <rsingh660@bloomberg.net>
2025-07-02 00:38:21 -04:00
karmada-bot c693ff8865
Merge pull request #6506 from jennryaz/erya-slog
Use structured logging for resource binding controller
2025-07-01 19:20:07 +08:00
karmada-bot e15e496a58
Merge pull request #6508 from wenhuwang/remedy-ctrl-logs
Add json logging support for remediation controller
2025-07-01 17:34:08 +08:00
wenhuwang 941ace03dc
Add json logging support for remediation controller
Signed-off-by: wenhuwang <wang15691700816@gmail.com>
2025-07-01 09:52:11 +08:00
Eugene Ryazanov 6347158e3e Use structured logging for resource binding controller
Signed-off-by: Eugene Ryazanov <yryazanov@bloomberg.net>
2025-06-30 10:19:16 -04:00
karmada-bot 1c4af4bb58
Merge pull request #6504 from zhzhuang-zju/releasenote-new
publish release note for v1.15.0-alpha.1, v1.14.1, v1.13.4, v1.12.7
2025-06-30 21:13:06 +08:00
zhzhuang-zju 5bcd841869 publish release note for v1.15.0-alpha.1, v1.14.1, v1.13.4, v1.12.7
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-06-30 20:14:19 +08:00
karmada-bot a8cafd0496
Merge pull request #6477 from zhzhuang-zju/frq
Use rbSpec.clusters and rbSpec.ReplicaRequirements to calculate rb' resource usage
2025-06-30 16:29:06 +08:00
karmada-bot 3e97b8ecd8
Merge pull request #6501 from karmada-io/dependabot/github_actions/sigstore/cosign-installer-3.9.1
Bump sigstore/cosign-installer from 3.9.0 to 3.9.1
2025-06-30 15:58:07 +08:00
dependabot[bot] 483fbf41d0
Bump sigstore/cosign-installer from 3.9.0 to 3.9.1
Bumps [sigstore/cosign-installer](https://github.com/sigstore/cosign-installer) from 3.9.0 to 3.9.1.
- [Release notes](https://github.com/sigstore/cosign-installer/releases)
- [Commits](https://github.com/sigstore/cosign-installer/compare/v3.9.0...v3.9.1)

---
updated-dependencies:
- dependency-name: sigstore/cosign-installer
  dependency-version: 3.9.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-30 07:05:57 +00:00
karmada-bot 956ed039df
Merge pull request #6491 from XiShanYongYe-Chang/apply-NoExecuteTaintEvictionPurgeMode-to-crb
Fix issue that taint-manager doesn't honour `--no-execute-taint-eviction-purge-mode` when evicting ClusterResourceBinding
2025-06-30 11:47:06 +08:00
karmada-bot b1c6095564
Merge pull request #6485 from RainbowMango/pr_update_kubernetes_compatibility
Link Kubernetes compatibility to website
2025-06-29 17:09:06 +08:00
karmada-bot 591193b358
Merge pull request #6496 from LeonZh0u/operator-structured-logs
JSON logging support for Karmada Operator
2025-06-27 18:59:03 +08:00
karmada-bot 450b642e19
Merge pull request #6490 from seanlaii/go-1.24
Bump go version to 1.24.4
2025-06-27 11:21:03 +08:00
wei-chenglai ea5bc281b9 Bump go version to 1.24.4
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-06-26 21:44:53 -04:00
karmada-bot 2ce5c464f8
Merge pull request #6494 from seanlaii/govet
fix: Address all `govet` printf issues across the codebase
2025-06-27 09:22:03 +08:00
lzhou286 a56fb20287 Structured Logging for Karmada Components
Signed-off-by: lzhou286 <lzhou286@bloomberg.net>

Fix test image

Signed-off-by: lzhou286 <lzhou286@bloomberg.net>

Fix linter

Signed-off-by: lzhou286 <lzhou286@bloomberg.net>

Fix linter

Signed-off-by: lzhou286 <lzhou286@bloomberg.net>
2025-06-26 18:20:24 -04:00
karmada-bot aa02d191bc
Merge pull request #6487 from SujoyDutta/sdutta/execution-ctrl-logs
continue using klog with structured logging
2025-06-27 03:05:04 +08:00
sdutta133 f69d55b61b continue using klog with structured logging
Signed-off-by: sdutta133 <sdutta133@bloomberg.net>

linter issue

Signed-off-by: sdutta133 <sdutta133@bloomberg.net>

fix logging pattern

Signed-off-by: sdutta133 <sdutta133@bloomberg.net>
2025-06-26 14:06:35 -04:00
karmada-bot e37fe36cfc
Merge pull request #6482 from Bhaumik10/ns-ctrl-logs
JSON Logging for Namespace sync controller
2025-06-27 01:18:04 +08:00
Bhaumik Patel 00dcfc0b6a JSON Logging for Namespace sync controller
Signed-off-by: Bhaumik Patel <bp1232@nyu.edu>

namespace-sync-ctrl:Address comments

Signed-off-by: Bhaumik Patel <bp1232@nyu.edu>
2025-06-26 11:13:15 -04:00
karmada-bot c44235afc5
Merge pull request #6493 from mszacillo/descheduler-structured-logs
Add JSON logging support for Karmada descheduler component
2025-06-26 21:28:05 +08:00
karmada-bot ea9bcecb6a
Merge pull request #6484 from RainbowMango/pr_enhance_prtemplate
improve pull request template
2025-06-26 12:19:02 +08:00
wei-chenglai d96c8863ff fix: Address all govet printf issues across the codebase
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-06-25 23:30:46 -04:00
mszacillo 3751a16058 Add JSON logging support for Karmada descheduler component
Signed-off-by: mszacillo <mszacillo@bloomberg.net>
2025-06-25 17:41:26 -04:00
changzhen 6093332de5 apply NoExecuteTaintEvictionPurgeMode value to the taint-manager when handle with clusterResourceBinding
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-06-25 17:50:18 +08:00
karmada-bot 0b75a70173
Merge pull request #6474 from zhzhuang-zju/binding
Fix workload scale up bypass FederatedResourceQuota check issue
2025-06-25 10:19:01 +08:00
zhzhuang-zju 7b9bf2fea1 fix the issue where federatedresourcequota becomes invalid when the workload scales up
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-06-24 20:34:25 +08:00
RainbowMango 702de1da1e Link Kubernetes compatibility to website
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-06-24 20:05:02 +08:00
RainbowMango 97a3d51d83 improve pull request template
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-06-24 15:01:19 +08:00
karmada-bot b20715eb4e
Merge pull request #6478 from karmada-io/dependabot/github_actions/sigstore/cosign-installer-3.9.0
Bump sigstore/cosign-installer from 3.8.2 to 3.9.0
2025-06-23 15:53:00 +08:00
dependabot[bot] 5b8e346eb0
Bump sigstore/cosign-installer from 3.8.2 to 3.9.0
Bumps [sigstore/cosign-installer](https://github.com/sigstore/cosign-installer) from 3.8.2 to 3.9.0.
- [Release notes](https://github.com/sigstore/cosign-installer/releases)
- [Commits](https://github.com/sigstore/cosign-installer/compare/v3.8.2...v3.9.0)

---
updated-dependencies:
- dependency-name: sigstore/cosign-installer
  dependency-version: 3.9.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-23 05:57:15 +00:00
karmada-bot e16a8e6f53
Merge pull request #6439 from nihar4276/metrics-adaptor-json-logging
JSON logging support for Karmada Metrics Adapter component
2025-06-23 09:28:00 +08:00
nrao65 eb3fed6aeb JSON logging support for Karmada Metrics Adapter component
Signed-off-by: nrao65 <nrao65@bloomberg.net>
2025-06-22 18:13:39 -04:00
zhzhuang-zju 3ed8f15934 use rbSpec.clusters and rbSpec.ReplicaRequirements to calculate rb's resource usage
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-06-20 17:43:31 +08:00
karmada-bot 2af742b2d3
Merge pull request #6466 from liwang0513/lwang/karmada_search_json_logging
JSON logging support for Karmada Search component
2025-06-20 11:31:57 +08:00
karmada-bot 22adecf2ce
Merge pull request #6473 from zzklachlan/karmada-scheduler-json-logging-support
Add json logging support for karmada scheduler
2025-06-20 11:29:57 +08:00
karmada-bot f5cbcbb4b2
Merge pull request #6457 from linyao22/scheduler-estimator-json-logging
JSON logging support for Karmada scheduler-estimator
2025-06-20 11:24:57 +08:00
Chelly_EGOIST 33d46e5919
added json logging support in karmada-agent (#6442)
* added json logging support in karmada-agent

Signed-off-by: Yifan <zyfinori@gmail.com>

* flush logs at the end of the main function of karmada-agent

Signed-off-by: Yifan <zyfinori@gmail.com>

---------

Signed-off-by: Yifan <zyfinori@gmail.com>
2025-06-20 11:21:57 +08:00
karmada-bot 312fbd088b
Merge pull request #6435 from jabellard/cluster-ctrl-logs
JSON logging for cluster controllers
2025-06-20 09:30:57 +08:00
karmada-bot a0178b7511
Merge pull request #6471 from jabellard/cm-json-log2
`karmada-controller-manager`: Add controller-runtime logs back
2025-06-19 20:42:57 +08:00
Joe Nathan Abellard 80c4221546 Update cluster controller logs
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-06-19 07:54:42 -04:00
Joe Nathan Abellard f804e7323b Set controller runtime logger on startup
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-06-19 07:36:48 -04:00
Kai Zhang 69b123f032 Add json logging support for karmada scheduler
Signed-off-by: Kai Zhang <zzhang905@bloomberg.net>
2025-06-18 12:04:36 -04:00
karmada-bot 571801fc39
Merge pull request #6430 from seanlaii/webhook-sl
JSON logging support for Karmada Webhook component
2025-06-18 17:05:56 +08:00
karmada-bot b85f29890d
Merge pull request #6450 from XiShanYongYe-Chang/remove-failover-eviction-timeout-flag
Remove failover-eviction-timeout flag in the karmada-controller-manager
2025-06-17 12:14:56 +08:00
karmada-bot afc4b4fc22
Merge pull request #6452 from zhzhuang-zju/helm
add helm index v1.14
2025-06-17 11:47:56 +08:00
Yao Lin 6eb2e71a58 JSON logging support for Karmada scheduler-estimator
Signed-off-by: Yao Lin <ylin298@bloomberg.net>
2025-06-16 20:34:55 -04:00
Li Wang 2f9eb82a15 JSON logging support for Karmada Search component
Signed-off-by: Li Wang <liwang0513@gmail.com>
2025-06-16 17:09:35 -07:00
karmada-bot c7d47050d9
Merge pull request #6447 from ryanwuer/make-expiry-configurable-for-root-ca-cert
feat: make expiry configurable for root ca cert
2025-06-16 14:40:55 +08:00
Ryan Wu b04b9a5000 feat: make expiry configurable for root ca cert
Signed-off-by: Ryan Wu <rongjun0821@gmail.com>
2025-06-16 13:47:10 +08:00
karmada-bot 74f615ad75
Merge pull request #6455 from abhinav-1305/update-ci
chore: update maintained versions to `1.14` and bump imageTag to `v1.14.0`
2025-06-16 11:14:54 +08:00
Abhinav Kumar 8f1e955154 chore: update dependabot target branches and image tags
Signed-off-by: Abhinav Kumar <abhinavkumar130503@gmail.com>
2025-06-16 07:51:33 +05:30
karmada-bot cf1a15c9ff
Merge pull request #6444 from XiShanYongYe-Chang/remove-toleration-flags-karmada-webhook
Remove default-not-ready-toleration-seconds and default-unreachable-toleration-seconds flags
2025-06-14 18:26:53 +08:00
karmada-bot 23848b3c0e
Merge pull request #6453 from zhzhuang-zju/flakey
flakey test: Retry on failed deployment update
2025-06-13 18:58:52 +08:00
zhzhuang-zju 7b54c00133 flakey test: Retry on failed deployment update
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-06-13 17:50:04 +08:00
zhzhuang-zju fe0975788a add helm index v1.14
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-06-13 16:26:21 +08:00
changzhen 52f445985a remove failover-eviction-timeout flag in the karmada-controller-manager
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-06-13 11:45:12 +08:00
karmada-bot 0714abb806
Merge pull request #6446 from XiShanYongYe-Chang/fix-6438
Fix the issue of unjoin cluster failed when disable Failover and taint-manager
2025-06-13 11:28:52 +08:00
changzhen c92b0d8edd register ResourceBindingIndexByFieldCluster without the control of Failover FeatureGate and taint-manager-controller
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-06-13 09:40:25 +08:00
changzhen cd1ae695f5 remove default-not-ready-toleration-seconds and default-unreachable-toleration-seconds flags for karmada-webhook
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-06-12 17:36:03 +08:00
karmada-bot bd5692f08a
Merge pull request #6432 from karmada-io/dependabot/github_actions/aquasecurity/trivy-action-0.31.0
Bump aquasecurity/trivy-action from 0.30.0 to 0.31.0
2025-06-09 14:40:49 +08:00
dependabot[bot] ead9434383
Bump aquasecurity/trivy-action from 0.30.0 to 0.31.0
Bumps [aquasecurity/trivy-action](https://github.com/aquasecurity/trivy-action) from 0.30.0 to 0.31.0.
- [Release notes](https://github.com/aquasecurity/trivy-action/releases)
- [Commits](https://github.com/aquasecurity/trivy-action/compare/0.30.0...0.31.0)

---
updated-dependencies:
- dependency-name: aquasecurity/trivy-action
  dependency-version: 0.31.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-09 05:49:19 +00:00
wei-chenglai 58a576332b JSON logging support for Karmada Webhook component
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-06-06 00:06:11 -04:00
deefreak bc4348d4a0
fixing hook issue with pre-install-job.yaml (#6395)
added pre-upgrade job



pre-upgrade-job review changes



pre-upgrade-job review changes



add pre-upgrade hook to the left cm

Signed-off-by: deefreak <deepakgts2@gmail.com>
2025-06-06 09:40:48 +08:00
Joe Nathan Abellard 343ffdaaf8
Enable JSON logging for Karmada controller manager (#6409)
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-06-06 09:03:46 +08:00
karmada-bot ff813d91f1
Merge pull request #6425 from XiShanYongYe-Chang/e2e-for-crb-reschedule
Add e2e for cluster-scope resource reschedule testing
2025-06-05 21:37:46 +08:00
changzhen ad438dc5b2 add e2e for cluster-scope resource reschedule testing
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-06-05 20:07:57 +08:00
karmada-bot 721b4726af
Merge pull request #6412 from XiShanYongYe-Chang/e2e-clustertaintpolicy
Add E2E for ClusterTaintPolicy controller
2025-06-04 14:55:45 +08:00
karmada-bot c97336c458
Merge pull request #6426 from zhzhuang-zju/releasenote-new
update swagger json
2025-06-04 14:17:44 +08:00
changzhen cc39b22fb9 add e2e for ClusterTaintPolicy controller
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-06-04 11:11:02 +08:00
zhzhuang-zju 0dc8cb9a31 update swagger json
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-06-04 10:24:19 +08:00
karmada-bot 994b068366
Merge pull request #6419 from karmada-io/dependabot/docker/cluster/images/alpine-3.22.0
Bump alpine from 3.21.3 to 3.22.0 in /cluster/images
2025-06-03 09:32:44 +08:00
dependabot[bot] 96b33e3a42
Bump alpine from 3.21.3 to 3.22.0 in /cluster/images
Bumps alpine from 3.21.3 to 3.22.0.

---
updated-dependencies:
- dependency-name: alpine
  dependency-version: 3.22.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-02 05:59:46 +00:00
karmada-bot 42dfade3fe
Merge pull request #6370 from XiShanYongYe-Chang/add-clustertaintpolicy-validation
Add the ClusterTaintPolicy validation
2025-05-30 20:16:41 +08:00
karmada-bot 453e5a1505
Merge pull request #6418 from zhzhuang-zju/releasenote-new
publish release v1.14.0&v1.13.3
2025-05-30 20:09:41 +08:00
karmada-bot 06e2cc6eaf
Merge pull request #6404 from XiShanYongYe-Chang/add-controller-flags-to-control-noexecute
Add failover flags to control the behavior of NoExecute
2025-05-30 20:02:42 +08:00
changzhen f969605411 add clustertaintpolicy validation
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-05-30 19:24:22 +08:00
zhzhuang-zju e1d9afaa11 publish release v1.14.0&v1.13.3
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-30 19:17:29 +08:00
changzhen a12ebddea8 add failover flags support-no-execute and no-execute-purge-mode to control taint-manager
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-05-30 19:10:34 +08:00
karmada-bot 5bf846b965
Merge pull request #6403 from XiShanYongYe-Chang/no-longer-add-terminting-NoExecute-taint
No longer add terminting NoExecute taint for the Deleting Cluster
2025-05-30 11:50:40 +08:00
changzhen 8f954fb96d no longer ad terminting NoExecute taint for the Deleting Cluster
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-05-29 22:21:16 +08:00
karmada-bot 34e17a61bd
Merge pull request #6417 from zhzhuang-zju/frqe2e
E2E: FederatedResourceQuota enforcement testing uses a unique namespace
2025-05-29 22:19:40 +08:00
karmada-bot fac95ad9ab
Merge pull request #6407 from zhzhuang-zju/period_quota
Periodically reconcile FederatedResourceQuota to update the usage state
2025-05-29 21:48:39 +08:00
zhzhuang-zju 8b03a530d3 E2E: FederatedResourceQuota enforcement testing uses a unique namespace
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-29 21:27:04 +08:00
zhzhuang-zju 4478ade8ca periodically reconcile FederatedResourceQuota to update the usage state
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-29 20:11:04 +08:00
karmada-bot 472822e2b7
Merge pull request #6408 from zhzhuang-zju/frqe2e
add e2e test to test FederatedResourceQuotaEnforcement
2025-05-29 18:18:39 +08:00
zhzhuang-zju bd71f13ba8 add e2e test to test FederatedResourceQuotaEnforcement
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-29 15:40:00 +08:00
karmada-bot 678b794c8f
Merge pull request #6413 from zhzhuang-zju/detector_optimization
detector: always use lateset resourcetemplate to build bindings
2025-05-29 10:10:39 +08:00
karmada-bot 9baf285c00
Merge pull request #6411 from RainbowMango/pr_skip_patch_rb_before_scheduled
Skip reconcile ResourceBindings if it has not been scheduled
2025-05-29 09:41:39 +08:00
karmada-bot 21ea6f6eec
Merge pull request #6397 from RainbowMango/pr_nominate_controller_owners
Nominate code owners of webhook and controllers
2025-05-29 09:37:39 +08:00
RainbowMango 12ad34ccfb Skip reconcile ResourceBindings if it has not been scheduled
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-05-28 18:52:42 +08:00
zhzhuang-zju 5964cc0ecb detector: always use lateset resourcetemplate to build bindings
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-28 15:30:36 +08:00
karmada-bot 307163dabc
Merge pull request #6400 from mszacillo/graceful-eviction-cleanup
Increase scope of Failover feature-gate to cover TaintManager
2025-05-26 17:53:37 +08:00
karmada-bot d75b38af78
Merge pull request #6405 from RainbowMango/pr_deprecate_flag-failover-eviction-timeout
Deprecate `--failover-eviction-timeout` as it becomes obsolete
2025-05-26 14:15:36 +08:00
RainbowMango c680adc609 The flag --failover-eviction-timeout in karmada-controller-manager now has been deprecated.
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-05-26 11:06:36 +08:00
karmada-bot 92954bd082
Merge pull request #6398 from RainbowMango/pr_link_overall_roadmap
Link roadmap to the one in community repo
2025-05-24 19:52:35 +08:00
RainbowMango 251fb32845 Link roadmap to community-wide roadmap
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-05-24 19:01:33 +08:00
karmada-bot 4eaed73480
Merge pull request #6389 from tiansuo114/taints
cluster-controller no longer taints clusters based on conditions
2025-05-24 16:18:35 +08:00
changzhen 2496cf3c08
cluster-controller no longer auto add not-ready and unreachable NoExecute taint for cluster
Signed-off-by: changzhen <changzhen5@huawei.com>

Limit the removal of automatically added taints to those of type "Execute" only.

Signed-off-by: tiansuo <zhaoyi_114@outlook.com>
2025-05-24 11:16:59 +08:00
karmada-bot 9d68815af0
Merge pull request #6373 from XiShanYongYe-Chang/remove-noexecute-toleration
Deprecated default-not-ready-toleration-seconds and default-unreachable-toleration-seconds flags for karmada-webhook
2025-05-23 16:08:34 +08:00
mszacillo cd6b9a4474 Increase scope of Failover feature-gate to cover TaintManager
Signed-off-by: mszacillo <mszacillo@bloomberg.net>
2025-05-22 22:39:15 -04:00
changzhen a5ffc84e2a deprecated default-not-ready-toleration-seconds and default-unreachable-toleration-seconds flags for karmada-webhook
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-05-23 10:33:10 +08:00
karmada-bot e8ddbd7e77
Merge pull request #6399 from XiShanYongYe-Chang/sort-policies-in-clustertaintpolicy-controller
Sort policies in cluster-taint-policy-controller to ensure idempotency
2025-05-23 09:17:34 +08:00
changzhen 8ce34ba61f sort policies in cluster-taint-policy-controller to ensure idempotency
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-05-22 20:41:16 +08:00
RainbowMango 4779e7bce9 Nominate code owners of webhook and controllers
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-05-22 11:33:58 +08:00
karmada-bot ca072092bb
Merge pull request #6390 from XiShanYongYe-Chang/update-clustertaintpolicy-api
Add comments for field updation and policy deletion for ClusterTaintPolicy API
2025-05-21 15:13:32 +08:00
changzhen eaf2973188 adjust the definition of ClusterTaintPolicy API
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-05-21 10:59:21 +08:00
karmada-bot 793b22106d
Merge pull request #6372 from ctripcloud/index
unify index registration
2025-05-20 16:33:31 +08:00
zach593 d1e12dfb7b unify index registration
Signed-off-by: zach593 <zach_li@outlook.com>
2025-05-20 15:31:12 +08:00
karmada-bot 2ac7306bfb
Merge pull request #6380 from baiyutang/feat/validate-lbClass
feat(controller.karmada): validateKarmadaAPIServer  validate lbClass
2025-05-20 08:49:31 +08:00
karmada-bot 75cecbb87c
Merge pull request #6386 from XiShanYongYe-Chang/refactor-GenerateTaintsMessage-method
cleanup: refactor GenerateTaintsMessage method
2025-05-19 11:17:30 +08:00
karmada-bot a004c9488b
Merge pull request #6385 from seanlaii/operator-feature
Add feature gates support to karmada-webhook config (Operator)
2025-05-18 09:29:29 +08:00
Joe Nathan Abellard 4ae3e675ca
Support Suspension of Reconciliation for Karmada Control Planes (#6359)
* Implement suspension of reconciliation of Karmada instance

Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>

* Implement suspension of reconciliation of Karmada instance

Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>

* Address comments

Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>

---------

Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-05-17 18:41:29 +08:00
changzhen 5e9501ad17 refactor GenerateTaintsMessage method
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-05-17 18:24:17 +08:00
karmada-bot 595c5b4e1f
Merge pull request #6358 from jabellard/operator-pause
Proposal: Support Suspension of Reconciliation for Karmada Control Planes
2025-05-17 17:50:29 +08:00
karmada-bot 4d3554ba80
Merge pull request #6378 from zhzhuang-zju/releasenote-new
publish release note for v1.14.0-rc.0
2025-05-17 12:00:30 +08:00
karmada-bot 1801dca105
Merge pull request #6384 from seanlaii/chart-feature
Add feature gates support to karmada-webhook config (Helm Chart)
2025-05-17 11:18:30 +08:00
wei-chenglai a9de52fe9a Add feature gates support to karmada-webhook config (Operator)
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-05-16 22:38:41 -04:00
karmada-bot ab9e018835
Merge pull request #6368 from XiShanYongYe-Chang/add-clustertaintpolicy-controller
Add the ClusterTaintPolicy controller to handle taints on the clusters
2025-05-17 10:27:29 +08:00
wei-chenglai 86837297dc Add feature gates support to karmada-webhook config (Helm Chart)
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-05-16 20:43:47 -04:00
changzhen 3a72ee9d68 add clustertaintpolicy controller
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-05-16 18:03:15 +08:00
zhzhuang-zju c0430749e1 publish release note for v1.14.0-rc.0
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-16 17:06:45 +08:00
karmada-bot 850c4fb9ef
Merge pull request #6377 from seanlaii/webhook-federatedRQ
Create RB ValidatingWebhook for FederatedResourceQuota check
2025-05-16 16:45:28 +08:00
wei-chenglai 4492764fe2 Add RB ValidatingWebhook for FederatedResourceQuota check
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-05-16 03:16:50 -04:00
karmada-bot 5730c74159
Merge pull request #6382 from liwang0513/lwang/update_staticAssignments_behavior
staticAssignment only updates AggregatedStatus
2025-05-16 12:41:28 +08:00
Li Wang cd4063e5f6 staticAssignment only updates AggregatedStatus
Signed-off-by: Li Wang <liwang0513@gmail.com>
2025-05-15 20:36:00 -07:00
karmada-bot cb26864e00
Merge pull request #6367 from mszacillo/frq-controller
Create federated-resource-quota-enforcement-controller
2025-05-16 10:13:28 +08:00
mszacillo 713b6b7c2d controller only reconcile quota, adding new events, fixing lint issues
Signed-off-by: mszacillo <mszacillo@bloomberg.net>
2025-05-15 12:52:56 -04:00
karmada-bot ade8678a04
Merge pull request #6375 from ctripcloud/work
move functions from pkg/util/helper/work.go to pkg/util/work.go
2025-05-15 21:25:27 +08:00
baiyutang 100b9b7add feat(controller.karmada): validateKarmadaAPIServer validate LoadBalancerClass field
Signed-off-by: baiyutang <irichardwang@gmail.com>
2025-05-15 17:18:49 +08:00
karmada-bot 94a9f8bdbb
Merge pull request #6319 from XiShanYongYe-Chang/add-clustertaintpolicy-api
Add the ClusterTaintPolicy API
2025-05-15 12:02:27 +08:00
karmada-bot afd0ac414e
Merge pull request #6348 from rajsinghtech/rajsinghtech/loadbalancerclass
feat(operator): support loadBalancerClass for karmadaAPIServer
2025-05-15 10:57:27 +08:00
Raj Singh a29335eb0c feat(operator): support loadBalancerClass for karmadaAPIServer
This commit introduces support for the `loadBalancerClass` field in the Karmada operator CRD for the `karmadaAPIServer` component.

When the `serviceType` for `karmadaAPIServer` is set to `LoadBalancer`, users can now specify a `loadBalancerClass` to select a specific load balancer implementation, aligning with Kubernetes Service behavior.

Changes include:
- Added `loadBalancerClass` field to the `karmadas.operator.karmada.io` CRD.
- Added `LoadBalancerClass` field to the `KarmadaAPIServer` struct in Go types.
- Updated the `createKarmadaAPIServerService` function to set the
  `spec.loadBalancerClass` on the Service object when applicable.

Signed-off-by: Raj Singh <raj@tailscale.com>
Signed-off-by: Raj Singh <rajsinghcpre@gmail.com>
2025-05-15 09:53:42 +08:00
zach593 0e25236c54 move functions from pkg/util/helper/work.go to pkg/util/work.go
Signed-off-by: zach593 <zach_li@outlook.com>
2025-05-14 17:35:42 +08:00
karmada-bot 12d6522e80
Merge pull request #6369 from zhzhuang-zju/unschedulableBindings
[priority] fix the issue where bindings that fail occasionally will be treated as unschedulableBindings
2025-05-14 15:12:26 +08:00
karmada-bot a34c412b50
Merge pull request #6269 from tiansuo114/server_cert
design:Add component certificate identification for karmada components
2025-05-14 09:19:25 +08:00
Joe Nathan Abellard fc2593b9aa Address comments
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-05-13 16:21:58 -04:00
mszacillo 20c2a182f0 Create federated-resource-quota-enforcement-controller
Signed-off-by: mszacillo <mszacillo@bloomberg.net>
2025-05-13 10:32:04 -04:00
tiansuo e67ae000e1 design:Add component certificate identification for karmada hack script deploy
Signed-off-by: tiansuo <zhaoyi_114@outlook.com>
2025-05-13 19:57:01 +08:00
changzhen c5279a45b3 add clustertaintpolicy api
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-05-13 16:20:29 +08:00
karmada-bot e2f0e6e9f7
Merge pull request #6363 from zhzhuang-zju/staticAssignments
FederatedResourceQuota: skip creating redundant ResourceQuotas to member clusters
2025-05-13 16:15:26 +08:00
zhzhuang-zju 7ad3d481c9 [priority] fix the issue where bindings that fails occasionally will be treated as unschedulableBindings
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-13 15:37:58 +08:00
zhzhuang-zju f6eff100ef FederatedResourceQuota: skip creating redundant ResourceQuotas to member clusters
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-13 15:14:05 +08:00
karmada-bot d4cf986b48
Merge pull request #2168 from likakuli/feature_quotanamevalidate
feature: add name length check for fedresourcequota
2025-05-13 14:15:25 +08:00
likakuli 9dde2b04dd fix: add name length check for fedresourcequota
Signed-off-by: likakuli <1154584512@qq.com>

fix: add name length check for fedresourcequota

Signed-off-by: likakuli <1154584512@qq.com>

fix: add name length check for fedresourcequota

Signed-off-by: likakuli <1154584512@qq.com>
2025-05-13 10:54:51 +08:00
karmada-bot 96d4577581
Merge pull request #6366 from liwang0513/lwang/add_fqe_feature_flag
add `FederatedQuotaEnforcement` feature flag
2025-05-13 09:24:26 +08:00
karmada-bot fa7e6adbd3
Merge pull request #6365 from zhzhuang-zju/cleanupnamespace
e2e: cleanup all namespaces created both in control plane and member clusters
2025-05-13 09:16:25 +08:00
liwang0513 2a8fe27add add FederatedQuotaEnforcement feature flag
Signed-off-by: liwang0513 <liwang0513@gmail.com>
2025-05-12 15:23:13 -07:00
zhzhuang-zju c08663deb2 e2e: cleanup all namespaces created both in control plane and member clusters
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-12 17:10:07 +08:00
karmada-bot fc44746412
Merge pull request #6364 from zhzhuang-zju/printcolumn
additional print columns for CRDs FederatedResourceQuota
2025-05-10 16:12:23 +08:00
zhzhuang-zju f42dcb79ec additional print columns for CRDs FederatedResourceQuota
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-10 11:35:47 +08:00
karmada-bot c5365b2468
Merge pull request #6315 from mohamedawnallah/updateGoDocCommentsForOperatorNamingsConstants
operator/pkg/util/naming: update go doc comments for more clarity and readability
2025-05-09 14:49:21 +08:00
karmada-bot 932ceb3ac9
Merge pull request #6356 from zhzhuang-zju/resourcesuspension
fixed the issue where resource scheduling suspension may become ineffective
2025-05-09 11:35:21 +08:00
zhzhuang-zju 3588f6b3ee fixed the issue where resource scheduling suspension may become ineffective
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-05-09 10:36:36 +08:00
karmada-bot 13bbcaa9b7
Merge pull request #6351 from RainbowMango/pr_twist_staticassignments_behavior
Twist the behavior of staticAssignments in FederatedResourceQuota
2025-05-09 09:40:21 +08:00
Joe Nathan Abellard f41e152f55 Add design doc
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-05-08 16:48:46 -04:00
RainbowMango 958e20dd6b Twist the behavior of staticAssignments in FederatedResourceQuota that clusters not listed will have no ResourceQuota created.
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-05-07 17:40:43 +08:00
karmada-bot cad6ad5dfa
Merge pull request #6189 from tiansuo114/self-signed_cert_proposal
docs: add proposal of Karmada Self-Signed Certificate Content Standardization
2025-05-07 14:48:21 +08:00
karmada-bot 4cf881aa3e
Merge pull request #6345 from seanlaii/upgrade-metrics
Bump metrics-server version to include k8s dependencies 0.32
2025-05-06 10:00:19 +08:00
karmada-bot 669dac4a58
Merge pull request #6344 from seanlaii/upgrade-custom-metrics
Bump custom-metrics-apiserver version to v1.32.0
2025-05-06 09:52:19 +08:00
wei-chenglai 78aa46a36e Bump metrics-server version to include k8s dependencies 0.32
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-05-03 18:58:33 -04:00
wei-chenglai d14e17c68b Bump custom-metrics-apiserver version to v1.32.0
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-05-03 15:14:57 -04:00
karmada-bot c5b2d3c65a
Merge pull request #6343 from seanlaii/upgrade-x-net
Bump golang.org/x/net version to 0.39.0
2025-05-03 21:35:17 +08:00
karmada-bot cdccd62c32
Merge pull request #6192 from ctripcloud/dynamic-ratelimiter
use singleton rate limiter for dynamically created member cluster clients
2025-05-03 21:16:16 +08:00
wei-chenglai 6f66e8bae7 Bump golang.org/x/net version to 0.39.0
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-30 21:30:04 -04:00
zach593 2b7ad8e673 use singleton rate limiter for dynamically created member cluster clients
Signed-off-by: zach593 <zach_li@outlook.com>
2025-04-30 21:26:12 +08:00
karmada-bot 8d1041bbce
Merge pull request #6331 from zhzhuang-zju/releasenote-new
publish release note for v1.14.0-beta.0, v1.13.2, v1.12.6, v1.11.9
2025-04-30 16:41:14 +08:00
zhzhuang-zju 486e3add02 publish release note for v1.14.0-beta.0, v1.13.2, v1.12.6, v1.11.9
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-04-30 14:48:00 +08:00
zhzhuang-zju 94a9764ddc publish release note for v1.14.0-beta.0, v1.13.2, v1.12.6, v1.11.9
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-04-30 14:47:31 +08:00
karmada-bot e89a092a68
Merge pull request #6284 from LivingCcj/fix_cluster-status-update-frequency
Fix cluster status update interval not hornor `--cluster-status-update-frequency` issue
2025-04-30 09:41:13 +08:00
karmada-bot 8cbefd8c87
Merge pull request #6181 from mszacillo/frq-proposal
Proposal to enhance FederatedResourceQuota to enforce resource limits directly on Karmada level
2025-04-30 05:21:14 +08:00
tiansuo114 7993912668
docs: add proposal of Karmada Self-Signed Certificate Content Standardization
Signed-off-by: tiansuo114 <zhaoyi_114@outlook.com>
2025-04-29 15:47:52 +08:00
karmada-bot def5021e9d
Merge pull request #4562 from everpeace/use-bootstrap-endpoint-instead-of-cluster-info
'karmadactl register' should use bootstrap endpoint for karmada-agents' kubeconfig instead of discovered cluster-info
2025-04-29 14:37:13 +08:00
Shingo Omura b6e0b45ca1 Use bootstrap endpoint for generating karmada-agents' kubeconfig in "karmadactl register" instead of discovered cluster-info
Signed-off-by: Shingo Omura <everpeace@gmail.com>
2025-04-29 11:15:45 +08:00
karmada-bot 73313d72e5
Merge pull request #6321 from seanlaii/operator-pod-ip
Update bind addresses to use POD_IP for karmada-operator
2025-04-29 10:44:14 +08:00
karmada-bot 5b7110468d
Merge pull request #6311 from RainbowMango/pr_bump_kubernetes_v1323
Bump Kubernetes dependencies from v1.31.3 to v1.32.3
2025-04-28 19:22:13 +08:00
karmada-bot d2959507b4
Merge pull request #6323 from seanlaii/karmadactl-pod-ip
Update bind addresses to use POD_IP for karmadactl
2025-04-28 15:39:13 +08:00
karmada-bot d8f8dff520
Merge pull request #6328 from karmada-io/dependabot/github_actions/sigstore/cosign-installer-3.8.2
build(deps): bump sigstore/cosign-installer from 3.8.1 to 3.8.2
2025-04-28 14:46:13 +08:00
dependabot[bot] 1656b3b818
build(deps): bump sigstore/cosign-installer from 3.8.1 to 3.8.2
Bumps [sigstore/cosign-installer](https://github.com/sigstore/cosign-installer) from 3.8.1 to 3.8.2.
- [Release notes](https://github.com/sigstore/cosign-installer/releases)
- [Commits](https://github.com/sigstore/cosign-installer/compare/v3.8.1...v3.8.2)

---
updated-dependencies:
- dependency-name: sigstore/cosign-installer
  dependency-version: 3.8.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-28 05:47:18 +00:00
karmada-bot 1381a58126
Merge pull request #6324 from seanlaii/helm-pod-ip
Update bind addresses to use POD_IP for helm
2025-04-28 10:57:12 +08:00
RainbowMango 70314fe38b adopt Kubernetes breaking change: 128196, moved version package from apiserver to component-base
Adopt Kubernetes 128196 breaking change
Adopt Kubernetes breaking change 127777, add delete options for Delete method in storage interface
Fix failing unit tests due to the feature WatchList featuregate now enabled by default
Fix failing unit test due to fake client changes

Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-04-28 09:40:16 +08:00
RainbowMango 6a8b90bb69 Bump Kubernetes dependencies from v1.31.3 to v1.32.3
Bump custom-metrcis-apiserver with a pseudo version
update go mod tidy
Update vendor
Update code gen
Update crdgen
Update swagger docs
Bump controller-runtime to v0.20.4

Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-04-28 09:39:39 +08:00
karmada-bot f3e95de6f9
Merge pull request #6326 from ikaven1024/fix-proxy-pb
fix proxy request error with pb content type
2025-04-27 14:51:11 +08:00
LivingCcj 6257c985c3 update to 'predicate.GenerationChangedPredicate' in cluster_status_controller
Signed-off-by: LivingCcj <livingccj@163.com>
2025-04-27 14:30:36 +08:00
karmada-bot 4e651a0a58
Merge pull request #6150 from ctripcloud/deepequal-objectWatcher
Add deepequal check for objectWatcher.Update()
2025-04-27 11:56:12 +08:00
yingjinhui 7835614378 fix proxy request error with pb content type
Signed-off-by: yingjinhui <yingjinhui@didiglobal.com>
2025-04-26 20:37:00 +08:00
wei-chenglai 71cedc7f5f Update bind addresses to use POD_IP for helm
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-25 17:18:07 -04:00
wei-chenglai c7deed92c2 Update bind addresses to use POD_IP for karmadactl
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-25 16:01:07 -04:00
wei-chenglai 0d35e6d00e Update bind addresses to use POD_IP for karmada-operator
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-25 12:55:37 -04:00
Mohamed Awnallah b45612808e operator/pkg/util/naming: update go doc comments
In this commit, we update go doc comments and parameter names
for more clarity.

Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
Co-authored-by: zhzhuang-zju <m17799853869@163.com>
2025-04-25 02:07:22 +00:00
karmada-bot c31c8e4a58
Merge pull request #5638 from mohamedawnallah/unitTestUploadInitTaskOperatorPackage
operator/pkg/tasks/init: unit test upload
2025-04-25 09:41:09 +08:00
mszacillo 4cbca9ca99 Proposal for federated resource quota enhancement
Signed-off-by: mszacillo <mszacillo@bloomberg.net>
2025-04-24 15:21:54 -04:00
Mohamed Awnallah 98b6bbb77a operator/pkg: address zhzhuang-zju feedback
Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
2025-04-24 04:53:11 +00:00
Mohamed Awnallah 36c75b73ed operator/pkg/tasks/init: test upload init task
In this commit, we unit test the upload init task in the operator
package on create new upload task and making sure the upload `kubeconfig`
secret working as expected.

Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
2025-04-24 04:53:11 +00:00
karmada-bot c160708fa1
Merge pull request #6318 from mohamedawnallah/testCI
.github/workflows/installation-charts.yaml: update Helm and Kustomize to latest versions
2025-04-23 15:56:08 +08:00
Mohamed Awnallah e19e32428e .github/workflows: update helm and kustomize env
Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
2025-04-23 03:35:58 +00:00
karmada-bot 2f6ff56c41
Merge pull request #5539 from mohamedawnallah/refactorMultiClusterServiceTest
pkg/webhook: refactor Multi-Cluster Service test
2025-04-22 15:29:08 +08:00
karmada-bot 9a8cbfd4ff
Merge pull request #5785 from mohamedawnallah/unitTestApplyKarmadactlCommand
pkg/karmadactl: unit test apply command
2025-04-22 10:22:07 +08:00
Mohamed Awnallah 1467ceaa21 pkg/webhook: refactor multi-cluster service
In this commit, we refactor the multi-cluster service obj names
and check on number of patches after the mutation happens.

Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
2025-04-22 01:45:57 +00:00
karmada-bot 37f2af6459
Merge pull request #5592 from mohamedawnallah/removeEtcdPeerServiceNameRedundantField
operator/pkg/controlplane/etcd: remove redundant `EtcdPeerServiceName`
2025-04-22 09:34:08 +08:00
Mohamed Awnallah 5d48d3c89b operator/pkg: rm redundant `EtcdPeerServiceName`
Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
2025-04-22 00:36:17 +00:00
Mohamed Awnallah e8ee7db2df pkg/karmadactl: make copyright up-to-date
Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
2025-04-21 23:41:38 +00:00
Mohamed Awnallah d1bf5a0ac9 pkg/karmadactl: unit test apply command
In this commit, we unit test apply command on validation,
and generating propagation objects.

Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
2025-04-21 23:41:37 +00:00
karmada-bot 87ee756ae7
Merge pull request #5813 from CharlesQQ/disdabledeepcopy
disable deepcopy for list action
2025-04-21 21:31:07 +08:00
chang.qiangqiang d58cbe1ea4 feat(binding): disable deepcopy for list action
Signed-off-by: chang.qiangqiang <chang.qiangqiang@immomo.com>
2025-04-19 23:53:47 +08:00
karmada-bot b3f93ecfa9
Merge pull request #6306 from seanlaii/ps-e2e
feat: Add SchedulePriority e2e tests
2025-04-18 14:39:05 +08:00
wei-chenglai d1d6a993fe add SchedulePriority e2e tests
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-17 21:49:13 -04:00
karmada-bot 18c48313ee
Merge pull request #6309 from liangyuanpeng/ci_1.32.0
chore: add kubernetes 1.32.0 to ci
2025-04-18 09:15:04 +08:00
Lan 5c3b177d6e chore: add kubernetes 1.32.0 to ci
Signed-off-by: Lan <gcslyp@gmail.com>
2025-04-17 22:24:37 +08:00
karmada-bot 8e09bf628e
Merge pull request #6271 from ctripcloud/context
replace stopCh with context
2025-04-17 17:21:03 +08:00
karmada-bot 707360fd34
Merge pull request #6303 from ClamRen/master
fix fhpa metrics issue introduced by #6300
2025-04-17 15:33:03 +08:00
zach593 273e95998d replace stopCh with context
Signed-off-by: zach593 <zach_li@outlook.com>
2025-04-17 14:12:13 +08:00
tangzr1 a428a17109 fix fhpa metrics issue introduced by #6300
Signed-off-by: tangzr1 <tangzr1@chinatelecom.cn>
2025-04-17 14:03:09 +08:00
Joe Nathan Abellard 6aa4f151e8
Proposal: Add PodDisruptionBudget (PDB) Support in the Karmada Operator (#6283)
* Add design doc

Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>

* Address comments

Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>

---------

Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-04-17 09:27:04 +08:00
karmada-bot 31e3e53129
Merge pull request #6301 from zhzhuang-zju/releasenote-new
publish release note for v1.14.0-alpha.2
2025-04-17 09:16:03 +08:00
zhzhuang-zju 7e309cf535 publish release note for v1.14.0-alpha.2
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-04-16 15:29:03 +08:00
karmada-bot 1417bef111
Merge pull request #6299 from seanlaii/disable-ST1000-p-comm
Add comment for disabling ST1000 and package-comments
2025-04-15 10:41:01 +08:00
wei-chenglai 48bab1473c Add comment for disabling ST1000 and package-comments
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-14 21:41:30 -04:00
karmada-bot 45cf955fd0
Merge pull request #6287 from liwang0513/lwang/fix_flink_interpreter
update Flink interpreter to check error when job state is not published
2025-04-15 09:20:02 +08:00
karmada-bot 25138677a3
Merge pull request #6296 from seanlaii/fix-ST1011
Fix lint issue ST1011
2025-04-15 09:11:02 +08:00
liwang0513 6d5cf4326d update Flink interpreter to check error when job state is not published
Signed-off-by: liwang0513 <liwang0513@gmail.com>
2025-04-14 10:14:50 -07:00
wei-chenglai 4984d59858 Fix lint issue ST1011
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-14 12:34:47 -04:00
karmada-bot a5cd153fa0
Merge pull request #6286 from mojojoji/add-scheduler-estimator-enable-option-to-helm
add support to enable scheduler estimator in helm chart
2025-04-14 20:49:02 +08:00
karmada-bot 2d8213fefb
Merge pull request #6294 from RainbowMango/pr_refactor_work_index
Move index register to dedicated package
2025-04-14 20:42:01 +08:00
Joji Augustine 305b3b671e add support to enable scheduler estimator in helm chart
Signed-off-by: Joji Augustine <jojimail@gmail.com>

set enabled scheduler estimator arg to true if true in values

Co-authored-by: zhzhuang-zju <m17799853869@163.com>
Signed-off-by: Joji Augustine <jojimail@gmail.com>

Signed-off-by: Joji Augustine <jojimail@gmail.com>
2025-04-14 13:26:48 +02:00
zach593 a4aa49fd00 add deepequal check in objectWatcher
Signed-off-by: zach593 <zach_li@outlook.com>
2025-04-14 18:35:33 +08:00
karmada-bot 8e18f30d6a
Merge pull request #5802 from CharlesQQ/resource-detector-optimization
feat(detector):  remove type convert in detector controller by controller-runtime
2025-04-14 18:03:01 +08:00
karmada-bot 4eb2d273ab
Merge pull request #6297 from seanlaii/fix-QF1001
Fix lint issue QF1001
2025-04-14 14:12:01 +08:00
karmada-bot 6d83324b41
Merge pull request #6295 from seanlaii/comment-QF1008
Add comment for disabling QF1008
2025-04-14 14:09:00 +08:00
chang.qiangqiang 6ac0f2cbed feat(detector): remove type convert in detector controller
Signed-off-by: chang.qiangqiang <chang.qiangqiang@immomo.com>
2025-04-14 10:50:03 +08:00
wei-chenglai d4614ed69c Fix lint issue QF1001
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-13 22:15:30 -04:00
wei-chenglai 596172923b Add comment for disabling QF1008
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-13 21:42:03 -04:00
karmada-bot 1d2515901b
Merge pull request #6285 from seanlaii/kind-pod-ip
Update bind addresses to use POD_IP for local-up script
2025-04-14 09:17:00 +08:00
karmada-bot 83a4bb853a
Merge pull request #6290 from seanlaii/fix-ST1005
Fix lint issue ST1005
2025-04-14 09:12:00 +08:00
wei-chenglai c12b040d8d Fix lint issue ST1005
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-12 12:57:07 -04:00
RainbowMango 5865ee6ce2 Refactor Work index WorkIndexByResourceBindingID and WorkIndexByClusterResourceBindingID
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-04-12 19:07:06 +08:00
karmada-bot 73cce5999c
Merge pull request #6292 from seanlaii/fix-QF1003
Fix lint issue QF1003
2025-04-12 19:02:59 +08:00
karmada-bot a05c7dc006
Merge pull request #6291 from seanlaii/fix-QF1004
Fix lint issue QF1004
2025-04-12 16:32:59 +08:00
karmada-bot 8659599d97
Merge pull request #6289 from seanlaii/fix-ST1019
Fix lint issue ST1019
2025-04-12 14:16:59 +08:00
wei-chenglai 51aec83d12 Fix lint issue fix-QF1003
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-12 00:33:48 -04:00
wei-chenglai 7fb59a8b62 Fix lint issue QF1004
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-12 00:29:11 -04:00
wei-chenglai 02598f609b Fix lint issue ST1019
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-12 00:14:23 -04:00
karmada-bot 71e103fb4e
Merge pull request #6237 from seanlaii/golangci-lint-v2
Upgrade golangci-lint from v1.64.8 to v2.0.2
2025-04-12 10:21:59 +08:00
wei-chenglai 38da3abc9b Update bind addresses to use POD_IP for local-up script
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-11 22:00:20 -04:00
wei-chenglai 2100504161 Upgrade golangci-lint from v1.64.8 to v2.0.2
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-11 09:26:32 -04:00
karmada-bot 5ce16455bb
Merge pull request #6252 from ctripcloud/svc-aggregate-status-dedup
Deduplicate and sort status for Service and Ingress when aggregating status
2025-04-11 10:51:58 +08:00
karmada-bot 787fd3a64a
Merge pull request #6215 from kubeservice-stack/add-karmada-metrics
feat: add build_info metrics and go runtime metrics for karmada
2025-04-10 10:45:58 +08:00
karmada-bot 0064b9ac63
Merge pull request #5334 from mohamedawnallah/testKarmadactlRegisterCommand
test/e2e/karmadactl_test.go: test register command
2025-04-09 10:34:56 +08:00
Mohamed Awnallah 74522cd394 test/e2e/suites: `FindString` to match the address
Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
Co-authored-by: zhzhuang-zju <m17799853869@163.com>
2025-04-08 16:19:48 +00:00
karmada-bot ba5ffbac4f
Merge pull request #6270 from zhzhuang-zju/timeout
fix the issue where discovery-timeout fails to work properly
2025-04-08 12:06:56 +08:00
zhzhuang-zju 49a37a78de fix the issue where discovery-timeout fails to work properly
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-04-08 11:11:21 +08:00
karmada-bot dbe0d22a1e
Merge pull request #6272 from seanlaii/bump-go-1-23-8
Bump go version to 1.23.8
2025-04-08 10:10:55 +08:00
wei-chenglai 31e7fdc0b5 Bump go version to 1.23.8
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-04-07 18:14:47 -04:00
karmada-bot f4773821b7
Merge pull request #6258 from XiShanYongYe-Chang/fix-6255
when cluster is not-ready doesn't cleanup MultiClusterService and EndpointSlice work
2025-04-07 15:33:55 +08:00
karmada-bot b8639a7f89
Merge pull request #6238 from zhzhuang-zju/releasenote-new
publish release notes for v1.11.8,v1.12.5,v1.13.1,v1.14.0-alpha.1
2025-04-07 15:09:55 +08:00
karmada-bot eda3685732
Merge pull request #6268 from my-git9/compatibility-1.13
Update Kubernetes compatibility
2025-04-07 15:07:56 +08:00
zhzhuang-zju 4eaf8ea6e9 publish release notes for v1.11.8,v1.12.5,v1.13.1,v1.14.0-alpha.1
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-04-07 09:46:06 +08:00
xin.li f543f4ce92 Update Kubernetes compatibility
Signed-off-by: xin.li <xin.li@daocloud.io>
2025-04-05 23:06:53 +08:00
Mohamed Awnallah e37b2dcf05 test/e2e/karmadactl_test.go: test register command
Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
Co-authored-by: zhzhuang-zju <m17799853869@163.com>
2025-04-03 10:28:02 +00:00
karmada-bot 8412c2069b
Merge pull request #6265 from zhzhuang-zju/uniquecluster
cleanup unused err check
2025-04-03 16:58:52 +08:00
zhzhuang-zju 664d27fbcd cleanup unused err check
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-04-03 16:02:47 +08:00
zach593 6c7a70f4e5 service aggregate status deduplication
Signed-off-by: zach593 <zach_li@outlook.com>
2025-04-03 11:09:44 +08:00
karmada-bot 4c7c15ae5b
Merge pull request #5830 from mohamedawnallah/unitTestDeInitKarmadactl
pkg/karmadactl: unit test deinit
2025-04-03 09:35:51 +08:00
Mohamed Awnallah 85061f5e26 pkg/karmadactl: unit test deinit
In this commit, we unit test deinit in karmadactl on deleting
all components related to karmada after it was initialized using init.

Signed-off-by: Mohamed Awnallah <mohamedmohey2352@gmail.com>
2025-04-02 09:39:09 +00:00
karmada-bot 9b7151020d
Merge pull request #6253 from zhzhuang-zju/uniquecluster
fix: A new pull-mode cluster may overwrite the existing member clusters
2025-04-02 16:54:51 +08:00
changzhen 2667cef349 when cluster is not-ready doesn't cleanup MultiClusterService and EndpointSlice work
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-04-02 16:04:41 +08:00
karmada-bot 5f258dd8b3
Merge pull request #6199 from CharlesQQ/dependencies-reduce-trigger
fix(dependenciesdistributor): reduce update trigger
2025-04-02 09:51:50 +08:00
chang.qiangqiang bfd88d2507 fix(dependenciesdistributor): reduce update trigger
Signed-off-by: chang.qiangqiang <chang.qiangqiang@immomo.com>
2025-04-02 00:36:53 +08:00
karmada-bot 138537bdb2
Merge pull request #6250 from zhzhuang-zju/documentation
Update alpine version used by karmadactl init
2025-04-01 15:32:50 +08:00
karmada-bot afe172d6a4
Merge pull request #6249 from ctripcloud/svc-aggregate-status
don't add cluster name to hostname field when aggregate service status
2025-04-01 15:21:50 +08:00
zhzhuang-zju d9318779fd update alpine version used by karmadactl init to 3.21.3
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-04-01 10:07:11 +08:00
zhzhuang-zju f43597eaa0 operator: update the crdtarball url in sample file
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-04-01 10:04:22 +08:00
zhzhuang-zju 1964f6dc78 correct the charts readme
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-04-01 10:01:57 +08:00
dongjiang e07b0937ef
add build_info for karmada binary
Signed-off-by: dongjiang <dongjiang1989@126.com>
2025-03-31 23:40:53 +08:00
karmada-bot 9ea0382293
Merge pull request #6221 from Pratham-B-Parlecha/fix/prathamci
Update Karmada CI and Documentation
2025-03-31 09:17:48 +08:00
zach593 1e3795e3b8 don't add cluster name to hostname field when aggregate service status
Signed-off-by: zach593 <zach_li@outlook.com>
2025-03-30 21:43:45 +08:00
zhzhuang-zju 6f6762579b fix: A new pull-mode cluster may overwrite the existing member clusters
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-03-28 15:45:29 +08:00
karmada-bot 65e7f74c99
Merge pull request #6188 from seanlaii/fix-helm
Fix missing ClusterRole, ClusterRoleBinding, and ServiceAccount for Custom Cert Mode in Helm Chart
2025-03-27 20:14:47 +08:00
karmada-bot fcb0be73c1
Merge pull request #6208 from zhzhuang-zju/e2e
e2e: add sidecar configuration e2e test
2025-03-27 11:24:45 +08:00
karmada-bot 4137c49af4
Merge pull request #6095 from ctripcloud/ratelimiter
unify http ratelimiter for control plane client
2025-03-27 09:14:46 +08:00
Pratham c0657e289e corrected the intention of the commented code
Signed-off-by: Pratham <prathamparlecha09@gmail.com>
2025-03-27 01:30:10 +05:30
Pratham 38619e9ffe documentation and ci update
Signed-off-by: Pratham <prathamparlecha09@gmail.com>
2025-03-27 01:30:10 +05:30
Pratham a846783aaa karmada version update
Signed-off-by: Pratham <prathamparlecha09@gmail.com>
2025-03-27 01:30:10 +05:30
Pratham dfe1089e57 ci issue
Signed-off-by: Pratham <prathamparlecha09@gmail.com>
2025-03-27 01:30:10 +05:30
zhzhuang-zju c2f7618a23 e2e: add sidecar configuration e2e test
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-03-26 16:40:58 +08:00
karmada-bot fc1bc17061
Merge pull request #6209 from zhzhuang-zju/operator-docs
add docs for configuring API Server sidecar in karmada operator
2025-03-26 11:16:44 +08:00
karmada-bot ba782c85c2
Merge pull request #6227 from XiShanYongYe-Chang/fix-3747
fix the issue where the binding gracefulEvictionTask can never be clear
2025-03-26 10:07:44 +08:00
karmada-bot f2ff5d0abb
Merge pull request #6228 from XiShanYongYe-Chang/update-mismatch-log-info
update mismatch log info in the taint-manager
2025-03-25 09:04:43 +08:00
changzhen 412952ec45 update mismatch log info in the taint-manager
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-03-24 20:54:42 +08:00
karmada-bot 15474e8b03
Merge pull request #6212 from seanlaii/bump-go-mockery
Bump mockery version to v2.53.3
2025-03-24 19:34:43 +08:00
changzhen cbfc085283 to prevent the generation judgment from always being false and block gracefulEvictionTask clearance, move the generation judgment logic to the reconcile
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-03-24 15:48:48 +08:00
wei-chenglai 6d0221bd26 Bump mockery to 2.53.3
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-03-23 23:03:04 -04:00
karmada-bot dc3a3305c7
Merge pull request #6225 from seanlaii/bump-go-net
Bump golang.org/x/net to 0.37.0
2025-03-24 10:50:43 +08:00
wei-chenglai e44362a51c Bump golang.org/x/net to 0.37.0
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-03-23 21:52:46 -04:00
karmada-bot 0235dc67c9
Merge pull request #6218 from seanlaii/bump-go-ci-lint
Bump golang to 1.23.7 and golangci-lint to 1.64.8
2025-03-24 09:25:42 +08:00
wei-chenglai d231a87aed Bump golang to 1.23.7 and golangci-lint to 1.64.8
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-03-22 13:03:28 -04:00
karmada-bot 85b745b38a
Merge pull request #6214 from jabellard/agent-fix
Fix segmentation violation error in `agent` component
2025-03-22 15:13:41 +08:00
karmada-bot 56b72f1a80
Merge pull request #6220 from warjiang/fix/typo-error
fix: typo error in karmadactl register
2025-03-21 11:46:40 +08:00
warjiang 8e4603af66 fix: typo error in karmadactl register
Signed-off-by: dingwenjiang <dingwenjiang@bytedance.com>
2025-03-21 10:51:25 +08:00
karmada-bot 6810e88643
Merge pull request #6043 from vie-serendipity/feat/suspend
feat: replace instruction annotation with work suspendDispatching field
2025-03-20 17:21:40 +08:00
karmada-bot 9a92463c8b
Merge pull request #6211 from RainbowMango/pr_orgnize_indexer
Update Works index with a unique name
2025-03-20 16:56:39 +08:00
karmada-bot 0713e4b9be
Merge pull request #6044 from kubeservice-stack/add-operator-metrics
feat: add operator build_info metrics and go runtime metrics
2025-03-20 16:31:41 +08:00
karmada-bot 2a25f5b555
Merge pull request #6145 from CharlesQQ/rate-limit-fix
add the ratelimiter options parameter for controllers that are missing it
2025-03-20 15:57:40 +08:00
dongjiang 276cb0d3d7
add build info and go runtime metrics
Signed-off-by: dongjiang <dongjiang1989@126.com>
Co-Authored-By: Copilot <175728472+Copilot@users.noreply.github.com>
2025-03-20 15:31:38 +08:00
chang.qiangqiang 2b109eb9df fix(detector): add ratelimiteroptions for controllers
Signed-off-by: chang.qiangqiang <chang.qiangqiang@immomo.com>
2025-03-20 15:00:42 +08:00
Joe Nathan Abellard c0525e8715 Fix segmentation violation error in karmada-agent component
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-03-19 22:03:18 -04:00
RainbowMango e3244462d7 Update Works index with a unique name
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-03-19 16:09:05 +08:00
karmada-bot b939368e08
Merge pull request #6206 from seanlaii/fix-local-up-cert
Fix the naming of the server certificate of karmada-scheduler-estimator
2025-03-19 15:07:39 +08:00
vie-serendipity 4b437e40f4 feat: replace instruction annotation with work suspendDispatching field
feat: rename workIndex and mv GetWorkSuspendDispatching to helper

feat: modify the style of Deprecated
Signed-off-by: vie-serendipity <2733147505@qq.com>

feat: label and annotate workloads even though work is suspended

Signed-off-by: vie-serendipity <2733147505@qq.com>

test: fix ut for CreateOrUpdateWork

Signed-off-by: vie-serendipity <2733147505@qq.com>
2025-03-19 15:04:55 +08:00
karmada-bot bc305c6563
Merge pull request #6184 from zhzhuang-zju/releasenote-new
gencomponentdocs support module all to generate all karmada components docs
2025-03-19 14:55:39 +08:00
zhzhuang-zju 3b6ac2eec0 add docs for configuring API Server sidecar in karmada operator
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-03-18 16:38:20 +08:00
karmada-bot 678362ba9a
Merge pull request #6207 from karmada-io/dependabot/github_actions/aquasecurity/trivy-action-0.30.0
build(deps): bump aquasecurity/trivy-action from 0.29.0 to 0.30.0
2025-03-17 14:16:37 +08:00
dependabot[bot] ad76f52c61
build(deps): bump aquasecurity/trivy-action from 0.29.0 to 0.30.0
Bumps [aquasecurity/trivy-action](https://github.com/aquasecurity/trivy-action) from 0.29.0 to 0.30.0.
- [Release notes](https://github.com/aquasecurity/trivy-action/releases)
- [Commits](https://github.com/aquasecurity/trivy-action/compare/0.29.0...0.30.0)

---
updated-dependencies:
- dependency-name: aquasecurity/trivy-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 05:13:53 +00:00
wei-chenglai 9db68191c1 Fix the naming of the server certificate of karmada-scheduler-estimato
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-03-15 17:15:40 -04:00
karmada-bot 6dfe381395
Merge pull request #6193 from jabellard/leaf-cert-validatity-imp
Add Support to Configure Leaf Certificate Validity Period in Karmada Operator
2025-03-13 16:48:34 +08:00
Joe Nathan Abellard b192c9c0d3 Support for custom leaf cert validity period
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-03-11 22:55:20 -04:00
karmada-bot 63ad83dfdd
Merge pull request #6173 from RainbowMango/pr_nominate_code_owners
Nominate code owners
2025-03-12 09:31:33 +08:00
karmada-bot 2b88a74123
Merge pull request #6198 from XiShanYongYe-Chang/remove-extra-symbol
remove extra symbol '/' in the karmada-aggregated-apiserver.yaml
2025-03-11 15:21:32 +08:00
changzhen 03c10992fb remove extra symbol '/' in the karmada-aggregated-apiserver.yaml
Signed-off-by: changzhen <changzhen5@huawei.com>
2025-03-11 14:22:07 +08:00
karmada-bot d6cc0c22db
Merge pull request #6196 from zhzhuang-zju/documentation
add helm index v1.13.0
2025-03-10 14:22:32 +08:00
karmada-bot 178f193ec1
Merge pull request #6148 from ctripcloud/object-watcher-metrics
update object watcher metrics and simplify cluster object version recording
2025-03-10 14:12:31 +08:00
karmada-bot 2dab8e4013
Merge pull request #6176 from jabellard/leaf-cert-validatity
Proposal: Add Support to Configure Leaf Certificate Validity Period in Karmada Operator
2025-03-10 11:33:31 +08:00
karmada-bot d3cf9b90c1
Merge pull request #6180 from jabellard/operator-cert-issue
Fix issues with running multiple Karmada instances in the same namespace
2025-03-10 11:30:31 +08:00
zhzhuang-zju 2cc457e211 add helm index v1.13.0
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-03-10 11:28:29 +08:00
karmada-bot 47b469a7df
Merge pull request #6191 from husnialhamdani/remove-deprecated-ca-cert-path
chore: remove deprecated --ca-cert-path from karmadactl
2025-03-10 09:36:31 +08:00
zach593 d226ce11f3 update object watcher metrics and simplify cluster object version recording
Signed-off-by: zach593 <zach_li@outlook.com>
2025-03-09 00:52:48 +08:00
Husni Alhamdani abe724ad57 chore: remove deprecated --ca-cert-path from karmadactl
Signed-off-by: Husni Alhamdani <dhanielluis@gmail.com>
2025-03-08 23:06:32 +07:00
karmada-bot 8d43fe2365
Merge pull request #6190 from zhzhuang-zju/documentation
move Maintainers to community repo
2025-03-07 17:08:29 +08:00
zhzhuang-zju 3d19825af9 harmonize maintainers to the primary one
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-03-07 15:28:35 +08:00
wei-chenglai b9256e85f1 Fix Missing Role, RoleBinding, and ServiceAccount for Custom Cert Mode in Helm Chart
Signed-off-by: wei-chenglai <qazwsx0939059006@gmail.com>
2025-03-06 23:07:05 -05:00
Joe Nathan Abellard 5652025362 Address comments
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-03-06 15:28:05 -05:00
zhzhuang-zju 6654fee4af gencomponentdocs support module all to generate all karmada components docs
Signed-off-by: zhzhuang-zju <m17799853869@163.com>
2025-03-04 17:32:59 +08:00
karmada-bot 824bd8d3e0
Merge pull request #6183 from karmada-io/dependabot/github_actions/slsa-framework/slsa-github-generator-2.1.0
build(deps): bump slsa-framework/slsa-github-generator from 2.0.0 to 2.1.0
2025-03-03 17:35:25 +08:00
karmada-bot 925d6c5732
Merge pull request #6182 from karmada-io/dependabot/github_actions/nick-fields/retry-3.0.2
build(deps): bump nick-fields/retry from 3.0.1 to 3.0.2
2025-03-03 15:41:24 +08:00
dependabot[bot] 8ac876ae93
build(deps): bump slsa-framework/slsa-github-generator
Bumps [slsa-framework/slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) from 2.0.0 to 2.1.0.
- [Release notes](https://github.com/slsa-framework/slsa-github-generator/releases)
- [Changelog](https://github.com/slsa-framework/slsa-github-generator/blob/main/CHANGELOG.md)
- [Commits](https://github.com/slsa-framework/slsa-github-generator/compare/v2.0.0...v2.1.0)

---
updated-dependencies:
- dependency-name: slsa-framework/slsa-github-generator
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 05:21:23 +00:00
dependabot[bot] 27d6436edf
build(deps): bump nick-fields/retry from 3.0.1 to 3.0.2
Bumps [nick-fields/retry](https://github.com/nick-fields/retry) from 3.0.1 to 3.0.2.
- [Release notes](https://github.com/nick-fields/retry/releases)
- [Changelog](https://github.com/nick-fields/retry/blob/master/.releaserc.js)
- [Commits](https://github.com/nick-fields/retry/compare/v3.0.1...v3.0.2)

---
updated-dependencies:
- dependency-name: nick-fields/retry
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 05:21:20 +00:00
Joe Nathan Abellard a6e4a4de24 Fix issues with running multiple Karmada instances in the same namespace
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-03-01 17:22:38 -05:00
Joe Nathan Abellard a80be33e4a Fix issues with running multiple Karmada instances in the same namespace
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-03-01 17:10:17 -05:00
Joe Nathan Abellard 83fbe65bb3 Add design doc
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-02-28 14:27:01 -05:00
zach593 696e52ef7e unify http ratelimiter for control plane client
Signed-off-by: zach593 <zach_li@outlook.com>
2025-02-28 22:37:12 +08:00
Joe Nathan Abellard f121f39e4c Add design doc
Signed-off-by: Joe Nathan Abellard <contact@jabellard.com>
2025-02-28 08:22:12 -05:00
karmada-bot 3b6c0e0fa2
Merge pull request #6159 from tw-mnewman/6144
Allow configuring karmada-apiserver OIDC via Helm
2025-02-28 20:32:22 +08:00
RainbowMango 0c8b8f5c70 Nominate code owners
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-02-28 16:37:46 +08:00
Matt Newman 98aa22ce29
Allow configuring karmada-apiserver OIDC via Helm
karmada-io/karmada#6144

Signed-off-by: Matt Newman <mnewman@thoughtworks.com>
2025-02-24 14:05:14 -06:00
mszacillo c026200b23 Proposal document for improvement to accurate estimator for CRD scheduling
Signed-off-by: mszacillo <mszacillo@bloomberg.net>
2024-07-12 18:27:59 -04:00
4331 changed files with 242537 additions and 177050 deletions

36
.gemini/config.yaml Normal file
View File

@ -0,0 +1,36 @@
# Config for the Gemini Pull Request Review Bot.
# https://github.com/marketplace/gemini-code-assist
# Enables fun features such as a poem in the initial pull request summary.
# Type: boolean, default: false.
have_fun: false
code_review:
# Disables Gemini from acting on PRs.
# Type: boolean, default: false.
disable: false
# Minimum severity of comments to post (LOW, MEDIUM, HIGH, CRITICAL).
# Type: string, default: MEDIUM.
comment_severity_threshold: MEDIUM
# Max number of review comments (-1 for unlimited).
# Type: integer, default: -1.
max_review_comments: -1
pull_request_opened:
# Post helpful instructions when PR is opened.
# Type: boolean, default: false.
help: true
# Post PR summary when opened.
# Type boolean, default: true.
summary: true
# Post code review on PR open.
# Type boolean, default: true.
code_review: true
# List of glob patterns to ignore (files and directories).
# Type: array of string, default: [].
ignore_patterns: []

View File

@ -2,30 +2,48 @@
<!--
Add one of the following kinds:
/kind api-change
/kind bug
/kind cleanup
/kind deprecation
/kind design
/kind documentation
/kind failing-test
/kind feature
/kind documentation
/kind cleanup
Optionally add one or more of the following kinds if applicable:
/kind api-change
/kind deprecation
/kind failing-test
/kind flake
/kind regression
-->
**What this PR does / why we need it**:
**Which issue(s) this PR fixes**:
<!--
*Automatically closes linked issue when PR is merged.
Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.*
-->
Fixes #
<!--
*Optionally link to the umbrella issue if this PR resolves part of it.
Usage: `Part of #<issue number>`, or `Part of (paste link of issue)`.*
Part of #
-->
**Special notes for your reviewer**:
<!--
Such as a test report of this PR.
-->
**Does this PR introduce a user-facing change?**:
<!--
If no, just write "NONE" in the release-note block below.
If yes, a release note is required.
Some brief examples of release notes:
1. `karmada-controller-manager`: Fixed the issue that xxx
2. `karmada-scheduler`: The deprecated flag `--xxx` now has been removed. Users of this flag should xxx.
3. `API Change`: Introduced `spec.<field>` to the PropagationPolicy API for xxx.
-->
```release-note

View File

@ -19,18 +19,18 @@ updates:
- package-ecosystem: docker
directory: /cluster/images/
target-branch: "release-1.12"
target-branch: "release-1.15"
schedule:
interval: weekly
- package-ecosystem: docker
directory: /cluster/images/
target-branch: "release-1.11"
target-branch: "release-1.14"
schedule:
interval: weekly
- package-ecosystem: docker
directory: /cluster/images/
target-branch: "release-1.10"
target-branch: "release-1.13"
schedule:
interval: weekly

View File

@ -27,10 +27,10 @@ jobs:
- karmada-search
- karmada-operator
- karmada-metrics-adapter
karmada-version: [ release-1.12, release-1.11, release-1.10 ]
karmada-version: [ release-1.15, release-1.14, release-1.13 ]
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
ref: ${{ matrix.karmada-version }}
- name: install Go
@ -47,7 +47,7 @@ jobs:
export REGISTRY="docker.io/karmada"
make image-${{ matrix.target }}
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@0.29.0
uses: aquasecurity/trivy-action@0.32.0
env:
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db
@ -56,15 +56,17 @@ jobs:
format: 'sarif'
ignore-unfixed: true
vuln-type: 'os,library'
cache: false
output: '${{ matrix.target }}:${{ matrix.karmada-version }}.trivy-results.sarif'
- name: display scan results
uses: aquasecurity/trivy-action@0.29.0
uses: aquasecurity/trivy-action@0.32.0
env:
TRIVY_SKIP_DB_UPDATE: true # Avoid updating the vulnerability db as it was cached in the previous step.
with:
image-ref: 'docker.io/karmada/${{ matrix.target }}:${{ matrix.karmada-version }}'
format: 'table'
ignore-unfixed: true
cache: false
vuln-type: 'os,library'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v3

View File

@ -31,7 +31,11 @@ jobs:
- karmada-metrics-adapter
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# fetch-depth:
# 0 indicates all history for all branches and tags.
fetch-depth: 0
- name: install Go
uses: actions/setup-go@v5
with:
@ -42,7 +46,7 @@ jobs:
export REGISTRY="docker.io/karmada"
make image-${{ matrix.target }}
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@0.29.0
uses: aquasecurity/trivy-action@0.32.0
env:
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db
@ -52,8 +56,9 @@ jobs:
ignore-unfixed: true
vuln-type: 'os,library'
output: 'trivy-results.sarif'
cache: false
- name: display scan results
uses: aquasecurity/trivy-action@0.29.0
uses: aquasecurity/trivy-action@0.32.0
env:
TRIVY_SKIP_DB_UPDATE: true # Avoid updating the vulnerability db as it was cached in the previous step.
with:
@ -61,6 +66,7 @@ jobs:
format: 'table'
ignore-unfixed: true
vuln-type: 'os,library'
cache: false
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v3
with:

View File

@ -19,8 +19,8 @@ jobs:
max-parallel: 5
fail-fast: false
matrix:
kubeapiserver-version: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0 ]
karmada-version: [ master, release-1.12, release-1.11, release-1.10 ]
kubeapiserver-version: [ v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0, v1.32.0, v1.33.0 ]
karmada-version: [ master, release-1.15, release-1.14, release-1.13 ]
env:
KARMADA_APISERVER_VERSION: ${{ matrix.kubeapiserver-version }}
steps:
@ -38,7 +38,7 @@ jobs:
docker-images: false
swap-storage: false
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# Number of commits to fetch. 0 indicates all history for all branches and tags.
# We need to guess version via git tags.
@ -49,7 +49,7 @@ jobs:
with:
go-version-file: go.mod
- name: setup e2e test environment
uses: nick-fields/retry@v3.0.1
uses: nick-fields/retry@v3.0.2
with:
max_attempts: 3
timeout_minutes: 20

View File

@ -19,7 +19,7 @@ jobs:
max-parallel: 5
fail-fast: false
matrix:
k8s: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0 ]
k8s: [ v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0, v1.32.0, v1.33.0 ]
steps:
# Free up disk space on Ubuntu
- name: Free Disk Space (Ubuntu)
@ -35,7 +35,7 @@ jobs:
docker-images: false
swap-storage: false
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# Number of commits to fetch. 0 indicates all history for all branches and tags.
# We need to guess version via git tags.
@ -45,7 +45,7 @@ jobs:
with:
go-version-file: go.mod
- name: setup e2e test environment
uses: nick-fields/retry@v3.0.1
uses: nick-fields/retry@v3.0.2
with:
max_attempts: 3
timeout_minutes: 20

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: install Go
uses: actions/setup-go@v5
with:
@ -38,7 +38,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: install Go
uses: actions/setup-go@v5
with:
@ -66,7 +66,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# Number of commits to fetch. 0 indicates all history for all branches and tags.
# We need to guess version via git tags.
@ -85,7 +85,7 @@ jobs:
GOTESTSUM_ENABLED: enabled
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: install Go
uses: actions/setup-go@v5
with:
@ -116,7 +116,7 @@ jobs:
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
# Please remember to update the CI Schedule Workflow when we add a new version.
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
steps:
# Free up disk space on Ubuntu
- name: Free Disk Space (Ubuntu)
@ -132,7 +132,7 @@ jobs:
docker-images: false
swap-storage: false
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# Number of commits to fetch. 0 indicates all history for all branches and tags.
# We need to guess version via git tags.
@ -144,6 +144,7 @@ jobs:
- name: setup e2e test environment
run: |
export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }}
export KUBE_CACHE_MUTATION_DETECTOR=true
hack/local-up-karmada.sh
- name: run e2e
run: |
@ -172,7 +173,7 @@ jobs:
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
# Please remember to update the CI Schedule Workflow when we add a new version.
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
steps:
# Free up disk space on Ubuntu
- name: Free Disk Space (Ubuntu)
@ -188,7 +189,7 @@ jobs:
docker-images: false
swap-storage: false
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# Number of commits to fetch. 0 indicates all history for all branches and tags.
# We need to guess version via git tags.

View File

@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# fetch-depth:
# 0 indicates all history for all branches and tags.

View File

@ -31,7 +31,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# fetch-depth:
# 0 indicates all history for all branches and tags.
@ -42,7 +42,7 @@ jobs:
with:
go-version-file: go.mod
- name: Install Cosign
uses: sigstore/cosign-installer@v3.8.1
uses: sigstore/cosign-installer@v3.9.2
with:
cosign-release: 'v2.2.3'
- name: install QEMU

View File

@ -15,7 +15,7 @@ jobs:
if: ${{ github.repository == 'karmada-io/karmada' }}
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# fetch-depth:
# 0 indicates all history for all branches and tags.

View File

@ -31,7 +31,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# fetch-depth:
# 0 indicates all history for all branches and tags.
@ -42,7 +42,7 @@ jobs:
with:
go-version-file: go.mod
- name: Install Cosign
uses: sigstore/cosign-installer@v3.8.1
uses: sigstore/cosign-installer@v3.9.2
with:
cosign-release: 'v2.2.3'
- name: install QEMU

View File

@ -7,7 +7,7 @@ on:
- 'dependabot/**'
permissions:
contents: read # Required by actions/checkout@v4 to fetch the repository contents.
contents: read # Required by actions/checkout@v5 to fetch the repository contents.
jobs:
fossa:
@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Run FOSSA scan and upload build data
uses: fossas/fossa-action@v1
with:

View File

@ -1,8 +1,8 @@
# validate any chart changes under charts directory
name: Chart
env:
HELM_VERSION: v3.11.2
KUSTOMIZE_VERSION: 5.4.3
HELM_VERSION: v3.17.3
KUSTOMIZE_VERSION: 5.6.0
on:
push:
# Exclude branches created by Dependabot to avoid triggering current workflow
@ -26,10 +26,10 @@ jobs:
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
# Please remember to update the CI Schedule Workflow when we add a new version.
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0
@ -47,6 +47,8 @@ jobs:
uses: syntaqx/setup-kustomize@v1
with:
kustomize-version: ${{ env.KUSTOMIZE_VERSION }}
env:
GITHUB_TOKEN: ${{ github.token }}
- name: Run chart-testing (template)
run: |

View File

@ -24,10 +24,10 @@ jobs:
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
# Please remember to update the CI Schedule Workflow when we add a new version.
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# Number of commits to fetch. 0 indicates all history for all branches and tags.
# We need to guess version via git tags.
@ -70,10 +70,10 @@ jobs:
fail-fast: false
matrix:
# Latest three minor releases of Kubernetes
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: install Go

View File

@ -24,7 +24,7 @@ jobs:
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
# Please remember to update the CI Schedule Workflow when we add a new version.
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
steps:
# Free up disk space on Ubuntu
- name: Free Disk Space (Ubuntu)
@ -40,7 +40,7 @@ jobs:
docker-images: false
swap-storage: false
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# Number of commits to fetch. 0 indicates all history for all branches and tags.
# We need to guess version via git tags.

View File

@ -23,7 +23,7 @@ jobs:
- amd64
- arm64
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Go
uses: actions/setup-go@v5
with:
@ -52,7 +52,7 @@ jobs:
hashes: ${{ steps.hash.outputs.hashes }}
steps:
- name: download cli
uses: actions/download-artifact@v4
uses: actions/download-artifact@v5
with:
path: _output/release
pattern: cli-*
@ -71,7 +71,7 @@ jobs:
id-token: write # Needed for provenance signing and ID
contents: write # Needed for release uploads
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
with:
base64-subjects: "${{ needs.generate-subject-for-cli-provenance.outputs.hashes }}"
provenance-name: "karmada-cli.intoto.jsonl"
@ -84,7 +84,7 @@ jobs:
hashes: ${{ steps.hash.outputs.hashes }}
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Rename the crds directory
run: |
mv ./charts/karmada/_crds ./charts/karmada/crds
@ -113,7 +113,7 @@ jobs:
id-token: write # Needed for provenance signing and ID
contents: write # Needed for release uploads
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
with:
base64-subjects: "${{ needs.release-crds-assests.outputs.hashes }}"
provenance-name: "karmada-crds.intoto.jsonl"
@ -126,7 +126,7 @@ jobs:
hashes: ${{ steps.hash.outputs.hashes }}
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Making helm charts
env:
VERSION: ${{ github.ref_name }}
@ -152,7 +152,7 @@ jobs:
id-token: write # Needed for provenance signing and ID
contents: write # Needed for release uploads
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
with:
base64-subjects: "${{ needs.release-charts.outputs.hashes }}"
provenance-name: "karmada-charts.intoto.jsonl"
@ -165,9 +165,9 @@ jobs:
hashes: ${{ steps.sbom-hash.outputs.hashes}}
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Generate sbom for karmada file system
uses: aquasecurity/trivy-action@0.29.0
uses: aquasecurity/trivy-action@0.32.0
with:
scan-type: 'fs'
format: 'spdx'
@ -195,7 +195,7 @@ jobs:
id-token: write # Needed for provenance signing and ID
contents: write # Needed for release uploads
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
with:
base64-subjects: "${{ needs.sbom-assests.outputs.hashes }}"
provenance-name: "karmada-sbom.intoto.jsonl"
@ -220,7 +220,7 @@ jobs:
echo "Got the latest tag:$LATEST_TAG"
echo "event.tag:"${{ github.event.release.tag_name }}
echo "latestTag=$LATEST_TAG" >> "$GITHUB_OUTPUT"
- uses: actions/checkout@v4
- uses: actions/checkout@v5
if: steps.get-latest-tag.outputs.latestTag == github.event.release.tag_name
- name: Update new version in krew-index
if: steps.get-latest-tag.outputs.latestTag == github.event.release.tag_name

View File

@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# fetch-depth:
# 0 indicates all history for all branches and tags.

View File

@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: install Go

View File

@ -1 +1 @@
1.22.12
1.24.6

View File

@ -1,10 +1,11 @@
# This files contains all configuration options for analysis running.
# More details please refer to: https://golangci-lint.run/usage/configuration/
version: "2"
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
# timeout for analysis, e.g. 30s, 5m, default timeout is disabled
timeout: 10m
# One of 'readonly' and 'vendor'.
# - readonly: the go command is disallowed from the implicit automatic updating of go.mod described above.
# Instead, it fails when any changes to go.mod are needed. This setting is most useful to check
@ -14,95 +15,95 @@ run:
modules-download-mode: readonly
linters:
enable:
# linters maintained by golang.org
- gofmt
- goimports
- govet
# linters default enabled by golangci-lint .
- errcheck
- gosimple
- ineffassign
- staticcheck
- typecheck
- unused
# other linters supported by golangci-lint.
- gci
- gocyclo
- gosec
- misspell
- whitespace
- revive
- depguard
linters-settings:
depguard:
rules:
main:
deny:
- pkg: "io/ioutil"
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
goimports:
local-prefixes: github.com/karmada-io/karmada
gocyclo:
# minimal cyclomatic complexity to report
min-complexity: 15
gci:
sections:
- Standard
- Default
- Prefix(github.com/karmada-io/karmada)
revive:
rules:
# Disable if-return as it is too strict and not always useful.
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
- name: if-return
disabled: true
- name: package-comments
- name: superfluous-else
arguments:
- preserveScope
- name: error-strings
- name: error-return
- name: receiver-naming
- name: increment-decrement
- name: range
- name: error-naming
- name: dot-imports
- name: errorf
- name: exported
- name: var-declaration
- name: blank-imports
- name: indent-error-flow
- name: unreachable-code
- name: var-naming
- name: redefines-builtin-id
- name: unused-parameter
- name: context-as-argument
- name: context-keys-type
- name: unexported-return
- name: time-naming
- name: empty-block
issues:
# The list of ids of default excludes to include or disable. By default it's empty.
include:
# disable excluding of issues about comments from revive
# see https://golangci-lint.run/usage/configuration/#command-line-options for more info
- EXC0012
- EXC0013
- EXC0014
# Which dirs to exclude: issues from them won't be reported.
# Can use regexp here: `generated.*`, regexp is applied on full path,
# including the path prefix if one is set.
# Default dirs are skipped independently of this option's value (see exclude-dirs-use-default).
# "/" will be replaced by current OS file path separator to properly work on Windows.
# Default: []
exclude-dirs:
- hack/tools/preferredimports # This code is directly lifted from the Kubernetes codebase, skip checking
- (^|/)vendor($|/)
- (^|/)third_party($|/)
- pkg/util/lifted # This code is lifted from other projects(Kubernetes, Kubefed, and so on), skip checking.
# Enables exclude of directories:
# - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
# Default: true
exclude-dirs-use-default: false
- depguard
- gocyclo
- gosec
- misspell
- revive
- whitespace
settings:
depguard:
rules:
main:
deny:
- pkg: io/ioutil
desc: 'replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil'
gocyclo:
# minimal cyclomatic complexity to report
min-complexity: 15
revive:
rules:
# Disable if-return as it is too strict and not always useful.
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
- name: if-return
disabled: true
# Disable package-comments for now since most packages in this project are primarily for internal use.
# If we decide to provide public packages in the future, we can move them to a separate
# repository and revisit adding package-level comments at that time.
- name: package-comments
disabled: true
- name: superfluous-else
arguments:
- preserveScope
- name: error-strings
- name: error-return
- name: receiver-naming
- name: increment-decrement
- name: range
- name: error-naming
- name: dot-imports
- name: errorf
- name: exported
- name: var-declaration
- name: blank-imports
- name: indent-error-flow
- name: unreachable-code
- name: var-naming
- name: redefines-builtin-id
- name: unused-parameter
- name: context-as-argument
- name: context-keys-type
- name: unexported-return
- name: time-naming
- name: empty-block
staticcheck:
checks:
- all
# Disable QF1008 to retain embedded fields for better readability.
- "-QF1008"
# Disable ST1000 (staticcheck) for now since most packages in this project are primarily for internal use.
# If we decide to provide public packages in the future, we can move them to a separate
# repository and revisit adding package-level comments at that time.
- "-ST1000"
exclusions:
generated: lax
presets:
- common-false-positives
- legacy
- std-error-handling
paths:
- hack/tools/preferredimports
- (^|/)vendor($|/)
- (^|/)third_party($|/)
- pkg/util/lifted
formatters:
enable:
- gci
- gofmt
- goimports
settings:
gci:
sections:
- Standard
- Default
- Prefix(github.com/karmada-io/karmada)
goimports:
local-prefixes:
- github.com/karmada-io/karmada
exclusions:
generated: lax
paths:
- hack/tools/preferredimports
- (^|/)vendor($|/)
- (^|/)third_party($|/)
- pkg/util/lifted

View File

@ -1,18 +1,3 @@
# Karmada Maintainers
Official list of Karmada Maintainers.
Please keep the below list sorted in ascending order.
## Maintainers
| Maintainer | GitHub ID | Affiliation | Email |
|-------------|-------------------|---------------|--------------------------|
| Hanbo Li | @mrlihanbo | ByteDance | <mrlihanbo@gmail.com> |
| Hongcai Ren | @RainbowMango | Huawei | <renhongcai@huawei.com> |
| Kevin Wang | @kevin-wangzefeng | Huawei | <wangzefeng@huawei.com> |
| Lei Xue | @carmark | Moore Threads | <vfs@live.com> |
| Shiyi Xie | @GitHubxsy | Huawei | <xieshiyi1@huawei.com> |
| Xiao Zhang | @wawa0210 | DaoCloud | <xiao.zhang@daocloud.io> |
| Yifan Shen | @zoroyouxi | ICBC | <shenyf@sdc.icbc.com.cn> |
| Yiheng Ci | @lfbear | VIPKID | <ciyiheng@vipkid.com.cn> |
See [MAINTAINERS in community repo](https://github.com/karmada-io/community/blob/main/MAINTAINERS.md)

1
OWNERS
View File

@ -11,4 +11,5 @@ approvers:
- Garrybest
- kevin-wangzefeng
- RainbowMango
- whitewindmills
- XiShanYongYe-Chang

View File

@ -186,12 +186,17 @@ nginx 2/2 2 2 20s
## Kubernetes compatibility
| | Kubernetes 1.16 | Kubernetes 1.17 | Kubernetes 1.18 | Kubernetes 1.19 | Kubernetes 1.20 | Kubernetes 1.21 | Kubernetes 1.22 | Kubernetes 1.23 | Kubernetes 1.24 | Kubernetes 1.25 | Kubernetes 1.26 | Kubernetes 1.27 | Kubernetes 1.28 | Kubernetes 1.29 |
|-----------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
| Karmada v1.7 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| Karmada v1.8 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| Karmada v1.9 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| Karmada HEAD (master) | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
Karmada is compatible with a wide range of Kubernetes versions. For detailed compatibility instructions,
please refer to the [Kubernetes Compatibility](https://karmada.io/docs/administrator/compatibility/).
The following table shows the compatibility test results against the latest 10 Kubernetes versions:
| | Kubernetes 1.33 | Kubernetes 1.32 | Kubernetes 1.31 | Kubernetes 1.30 | Kubernetes 1.29 | Kubernetes 1.28 | Kubernetes 1.27 | Kubernetes 1.26 | Kubernetes 1.25 | Kubernetes 1.24 | Kubernetes 1.23 |
|-----------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
| Karmada v1.12 | | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| Karmada v1.13 | | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| Karmada v1.14 | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| Karmada HEAD (master) | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
Key:
* `✓` Karmada and the Kubernetes version are exactly compatible.

View File

@ -1,29 +1,4 @@
# Karmada Roadmap
This document defines a high level roadmap for Karmada development and upcoming releases.
Community and contributor involvement is vital for successfully implementing all desired items for each release.
We hope that the items listed below will inspire further engagement from the community to keep Karmada progressing and shipping exciting and valuable features.
## 2024 H1
- Lazy mode of PropagationPolicy
- Cluster Problem Detector(CPD) - Part one: Cluster condition-based remedy system
- Scheduler Enhancement - enable scheduler estimator supports resource quota
- Scheduler Enhancement - Provide a mechanism of re-balance workloads
## 2024 H2
- AI training and batch job support (Including PyTorch, Spark, Flink and so on)
- Karmada Dashboard - alpha release
- Multi-cluster workflow
- Scheduler Enhancement - Optimize scheduling with GPU resources
## Pending
- Cluster addon management
- Multi-cluster Application
- Multi-cluster monitoring
- Multi-cluster logging
- Multi-cluster storage
- Multi-cluster RBAC
- Multi-cluster networking
- Data migration across clusters
- Image registry across clouds
- Multi-cluster Service Mesh solutions
This document has been moved to [karmada-io/community](https://github.com/karmada-io/community/blob/main/ROADMAP.md)
to include all efforts for this repository and subprojects.

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,11 @@ spec:
- name: karmada-agent
image: docker.io/karmada/karmada-agent:latest
imagePullPolicy: {{image_pull_policy}}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- /bin/karmada-agent
- --karmada-kubeconfig=/etc/karmada/config/karmada.config
@ -31,9 +36,10 @@ spec:
- --cluster-name={{member_cluster_name}}
- --cluster-api-endpoint={{member_cluster_api_endpoint}}
- --cluster-status-update-frequency=10s
- --health-probe-bind-address=0.0.0.0:10357
- --metrics-bind-address=:8080
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10357
- --feature-gates=CustomizedClusterResourceModeling=true,MultiClusterService=true
- --logging-format=json
- --v=4
livenessProbe:
httpGet:

View File

@ -27,6 +27,11 @@ spec:
securityContext:
allowPrivilegeEscalation: false
privileged: false
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- /bin/karmada-aggregated-apiserver
- --kubeconfig=/etc/karmada/config/karmada.config
@ -37,11 +42,13 @@ spec:
- --etcd-certfile=/etc/karmada/pki/etcd-client/tls.crt
- --etcd-keyfile=/etc/karmada/pki/etcd-client/tls.key
- --tls-cert-file=/etc/karmada/pki/server/tls.crt
- --tls-private-key-file=/etc/karmada/pki//server/tls.key
- --tls-private-key-file=/etc/karmada/pki/server/tls.key
- --audit-log-path=-
- --audit-log-maxage=0
- --audit-log-maxbackup=0
- --tls-min-version=VersionTLS13
- --logging-format=json
- --bind-address=$(POD_IP)
resources:
requests:
cpu: 100m

View File

@ -26,15 +26,23 @@ spec:
privileged: false
image: docker.io/karmada/karmada-controller-manager:latest
imagePullPolicy: IfNotPresent
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_CACHE_MUTATION_DETECTOR
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
command:
- /bin/karmada-controller-manager
- --kubeconfig=/etc/karmada/config/karmada.config
- --metrics-bind-address=:8080
- --cluster-status-update-frequency=10s
- --failover-eviction-timeout=30s
- --controllers=*,hpaScaleTargetMarker,deploymentReplicasSyncer
- --feature-gates=AllAlpha=true,AllBeta=true
- --health-probe-bind-address=0.0.0.0:10357
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10357
- --enable-no-execute-taint-eviction=true
- --logging-format=json
- --v=4
livenessProbe:
httpGet:

View File

@ -26,14 +26,22 @@ spec:
privileged: false
image: docker.io/karmada/karmada-descheduler:latest
imagePullPolicy: IfNotPresent
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_CACHE_MUTATION_DETECTOR
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
command:
- /bin/karmada-descheduler
- --kubeconfig=/etc/karmada/config/karmada.config
- --metrics-bind-address=0.0.0.0:8080
- --health-probe-bind-address=0.0.0.0:10358
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10358
- --scheduler-estimator-ca-file=/etc/karmada/pki/scheduler-estimator-client/ca.crt
- --scheduler-estimator-cert-file=/etc/karmada/pki/scheduler-estimator-client/tls.crt
- --scheduler-estimator-key-file=/etc/karmada/pki/scheduler-estimator-client/tls.key
- --logging-format=json
- --v=4
livenessProbe:
httpGet:

View File

@ -27,10 +27,17 @@ spec:
privileged: false
image: docker.io/karmada/karmada-metrics-adapter:latest
imagePullPolicy: IfNotPresent
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_CACHE_MUTATION_DETECTOR
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
command:
- /bin/karmada-metrics-adapter
- --kubeconfig=/etc/karmada/config/karmada.config
- --metrics-bind-address=:8080
- --metrics-bind-address=$(POD_IP):8080
- --authentication-kubeconfig=/etc/karmada/config/karmada.config
- --authorization-kubeconfig=/etc/karmada/config/karmada.config
- --client-ca-file=/etc/karmada/pki/server/ca.crt
@ -40,6 +47,8 @@ spec:
- --audit-log-maxage=0
- --audit-log-maxbackup=0
- --tls-min-version=VersionTLS13
- --bind-address=$(POD_IP)
- --logging-format=json
readinessProbe:
httpGet:
path: /readyz

View File

@ -26,6 +26,13 @@ spec:
privileged: false
image: docker.io/karmada/karmada-scheduler-estimator:latest
imagePullPolicy: IfNotPresent
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_CACHE_MUTATION_DETECTOR
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
command:
- /bin/karmada-scheduler-estimator
- --kubeconfig=/etc/{{member_cluster_name}}-kubeconfig
@ -33,8 +40,9 @@ spec:
- --grpc-auth-cert-file=/etc/karmada/pki/server/tls.crt
- --grpc-auth-key-file=/etc/karmada/pki/server/tls.key
- --grpc-client-ca-file=/etc/karmada/pki/server/ca.crt
- --metrics-bind-address=0.0.0.0:8080
- --health-probe-bind-address=0.0.0.0:10351
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10351
- --logging-format=json
livenessProbe:
httpGet:
path: /healthz
@ -58,7 +66,7 @@ spec:
volumes:
- name: server-cert
secret:
secretName: karmada-metrics-adapter-cert
secretName: karmada-scheduler-estimator-cert
- name: member-kubeconfig
secret:
secretName: {{member_cluster_name}}-kubeconfig

View File

@ -39,16 +39,24 @@ spec:
- containerPort: 8080
name: metrics
protocol: TCP
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_CACHE_MUTATION_DETECTOR
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
command:
- /bin/karmada-scheduler
- --kubeconfig=/etc/karmada/config/karmada.config
- --metrics-bind-address=0.0.0.0:8080
- --health-probe-bind-address=0.0.0.0:10351
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10351
- --enable-scheduler-estimator=true
- --scheduler-estimator-ca-file=/etc/karmada/pki/scheduler-estimator-client/ca.crt
- --scheduler-estimator-cert-file=/etc/karmada/pki/scheduler-estimator-client/tls.crt
- --scheduler-estimator-key-file=/etc/karmada/pki/scheduler-estimator-client/tls.key
- --feature-gates=AllAlpha=true,AllBeta=true
- --logging-format=json
- --v=4
volumeMounts:
- name: karmada-config

View File

@ -27,6 +27,13 @@ spec:
privileged: false
image: docker.io/karmada/karmada-search:latest
imagePullPolicy: IfNotPresent
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_CACHE_MUTATION_DETECTOR
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
command:
- /bin/karmada-search
- --kubeconfig=/etc/karmada/config/karmada.config
@ -42,6 +49,8 @@ spec:
- --audit-log-maxage=0
- --audit-log-maxbackup=0
- --tls-min-version=VersionTLS13
- --bind-address=$(POD_IP)
- --logging-format=json
livenessProbe:
httpGet:
path: /livez

View File

@ -26,15 +26,22 @@ spec:
privileged: false
image: docker.io/karmada/karmada-webhook:latest
imagePullPolicy: IfNotPresent
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- /bin/karmada-webhook
- --kubeconfig=/etc/karmada/config/karmada.config
- --bind-address=0.0.0.0
- --metrics-bind-address=:8080
- --default-not-ready-toleration-seconds=30
- --default-unreachable-toleration-seconds=30
- --bind-address=$(POD_IP)
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):8000
- --secure-port=8443
- --cert-dir=/etc/karmada/pki/server
- --feature-gates=AllAlpha=true,AllBeta=true
- --allow-no-execute-taint-policy=true
- --logging-format=json
- --v=4
ports:
- containerPort: 8443

View File

@ -296,3 +296,31 @@ webhooks:
sideEffects: None
admissionReviewVersions: [ "v1" ]
timeoutSeconds: 3
- name: resourcebinding.karmada.io
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: ["work.karmada.io"]
apiVersions: ["*"]
resources: ["resourcebindings"]
scope: "Namespaced"
clientConfig:
url: https://karmada-webhook.karmada-system.svc:443/validate-resourcebinding
caBundle: {{caBundle}}
failurePolicy: Fail
sideEffects: NoneOnDryRun
admissionReviewVersions: ["v1"]
timeoutSeconds: 3
- name: clustertaintpolicy.karmada.io
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: ["policy.karmada.io"]
apiVersions: ["*"]
resources: ["clustertaintpolicies"]
scope: "Cluster"
clientConfig:
url: https://karmada-webhook.karmada-system.svc:443/validate-clustertaintpolicy
caBundle: {{caBundle}}
failurePolicy: Fail
sideEffects: None
admissionReviewVersions: [ "v1" ]
timeoutSeconds: 3

View File

@ -5,6 +5,7 @@ reviewers:
- jrkeen
- pidb
- Poor12
- zhzhuang-zju
approvers:
- a7i
- chaosi-zju

View File

@ -1,6 +1,46 @@
apiVersion: v1
entries:
karmada:
- apiVersion: v2
appVersion: v1.1.0
created: "2025-06-13T16:23:17.081220385+08:00"
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
version: 2.x.x
description: A Helm chart for karmada
digest: cd93e64198f364ff2330d718d80b8f321530ab8147521ef2b6263198a35bc7e0
kubeVersion: '>= 1.16.0-0'
maintainers:
- email: chaosi@zju.edu.cn
name: chaosi-zju
- email: amiralavi7@gmail.com
name: a7i
name: karmada
type: application
urls:
- https://github.com/karmada-io/karmada/releases/download/v1.14.0/karmada-chart-v1.14.0.tgz
version: v1.14.0
- apiVersion: v2
appVersion: v1.1.0
created: "2025-03-10T11:24:11.714162019+08:00"
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
version: 2.x.x
description: A Helm chart for karmada
digest: f1fa71eda8d924258c1b1aff58f14110f1b2c0935accf7b1b98f6dede5495b94
kubeVersion: '>= 1.16.0-0'
maintainers:
- email: chaosi@zju.edu.cn
name: chaosi-zju
- email: amiralavi7@gmail.com
name: a7i
name: karmada
type: application
urls:
- https://github.com/karmada-io/karmada/releases/download/v1.13.0/karmada-chart-v1.13.0.tgz
version: v1.13.0
- apiVersion: v2
appVersion: v1.1.0
created: "2024-12-09T12:16:02.111955134+08:00"
@ -404,4 +444,4 @@ entries:
urls:
- https://github.com/karmada-io/karmada/releases/download/v1.8.0/karmada-operator-chart-v1.8.0.tgz
version: v1.8.0
generated: "2024-12-09T12:16:03.776417885+08:00"
generated: "2025-06-13T16:23:17.069242033+08:00"

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: karmadas.operator.karmada.io
spec:
group: operator.karmada.io
@ -653,6 +653,8 @@ spec:
description: |-
awsElasticBlockStore represents an AWS Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
properties:
fsType:
@ -684,8 +686,10 @@ spec:
- volumeID
type: object
azureDisk:
description: azureDisk represents an Azure Data Disk
mount on the host and bind mount to the pod.
description: |-
azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
are redirected to the disk.csi.azure.com CSI driver.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode:
@ -724,8 +728,10 @@ spec:
- diskURI
type: object
azureFile:
description: azureFile represents an Azure File Service
mount on the host and bind mount to the pod.
description: |-
azureFile represents an Azure File Service mount on the host and bind mount to the pod.
Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
are redirected to the file.csi.azure.com CSI driver.
properties:
readOnly:
description: |-
@ -744,8 +750,9 @@ spec:
- shareName
type: object
cephfs:
description: cephFS represents a Ceph FS mount on the
host that shares a pod's lifetime
description: |-
cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
properties:
monitors:
description: |-
@ -798,6 +805,8 @@ spec:
cinder:
description: |-
cinder represents a cinder volume attached and mounted on kubelets host machine.
Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
are redirected to the cinder.csi.openstack.org CSI driver.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
properties:
fsType:
@ -909,7 +918,7 @@ spec:
csi:
description: csi (Container Storage Interface) represents
ephemeral storage that is handled by certain external
CSI drivers (Beta feature).
CSI drivers.
properties:
driver:
description: |-
@ -1397,6 +1406,7 @@ spec:
description: |-
flexVolume represents a generic volume resource that is
provisioned/attached using an exec based plugin.
Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
properties:
driver:
description: driver is the name of the driver to
@ -1442,9 +1452,9 @@ spec:
- driver
type: object
flocker:
description: flocker represents a Flocker volume attached
to a kubelet's host machine. This depends on the Flocker
control service being running
description: |-
flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
properties:
datasetName:
description: |-
@ -1460,6 +1470,8 @@ spec:
description: |-
gcePersistentDisk represents a GCE Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
properties:
fsType:
@ -1495,7 +1507,7 @@ spec:
gitRepo:
description: |-
gitRepo represents a git repository at a particular revision.
DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
into the Pod's container.
properties:
@ -1519,6 +1531,7 @@ spec:
glusterfs:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
@ -1578,7 +1591,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@ -1728,9 +1741,9 @@ spec:
- claimName
type: object
photonPersistentDisk:
description: photonPersistentDisk represents a PhotonController
persistent disk attached and mounted on kubelets host
machine
description: |-
photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
properties:
fsType:
description: |-
@ -1746,8 +1759,11 @@ spec:
- pdID
type: object
portworxVolume:
description: portworxVolume represents a portworx volume
attached and mounted on kubelets host machine
description: |-
portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
is on.
properties:
fsType:
description: |-
@ -2118,8 +2134,9 @@ spec:
x-kubernetes-list-type: atomic
type: object
quobyte:
description: quobyte represents a Quobyte mount on the
host that shares a pod's lifetime
description: |-
quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
properties:
group:
description: |-
@ -2158,6 +2175,7 @@ spec:
rbd:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
@ -2230,8 +2248,9 @@ spec:
- monitors
type: object
scaleIO:
description: scaleIO represents a ScaleIO persistent
volume attached and mounted on Kubernetes nodes.
description: |-
scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
properties:
fsType:
default: xfs
@ -2364,8 +2383,9 @@ spec:
type: string
type: object
storageos:
description: storageOS represents a StorageOS volume
attached and mounted on Kubernetes nodes.
description: |-
storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
properties:
fsType:
description: |-
@ -2410,8 +2430,10 @@ spec:
type: string
type: object
vsphereVolume:
description: vsphereVolume represents a vSphere volume
attached and mounted on kubelets host machine
description: |-
vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
are redirected to the csi.vsphere.vmware.com CSI driver.
properties:
fsType:
description: |-
@ -2471,6 +2493,17 @@ spec:
and services.
More info: http://kubernetes.io/docs/user-guide/labels
type: object
loadBalancerClass:
description: |-
LoadBalancerClass specifies the load balancer implementation class for the Karmada API server.
This field is applicable only when ServiceType is set to LoadBalancer.
If specified, the service will be processed by the load balancer implementation that matches the specified class.
By default, this is not set and the LoadBalancer type of Service uses the cloud provider's default load balancer
implementation.
Once set, it cannot be changed. The value must be a label-style identifier, with an optional prefix such as
"internal-vip" or "example.com/internal-vip".
More info: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
type: string
priorityClassName:
default: system-node-critical
description: |-
@ -2741,7 +2774,7 @@ spec:
Cannot be updated.
items:
description: EnvFromSource represents the source of
a set of ConfigMaps
a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@ -2762,8 +2795,8 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend
to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: Optional text to prepend to the name
of each environment variable. Must be a C_IDENTIFIER.
type: string
secretRef:
description: The Secret to select from
@ -2814,7 +2847,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
properties:
exec:
description: Exec specifies the action to take.
description: Exec specifies a command to execute
in the container.
properties:
command:
description: |-
@ -2829,7 +2863,7 @@ spec:
x-kubernetes-list-type: atomic
type: object
httpGet:
description: HTTPGet specifies the http request
description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@ -2880,8 +2914,8 @@ spec:
- port
type: object
sleep:
description: Sleep represents the duration that
the container should sleep before being terminated.
description: Sleep represents a duration that
the container should sleep.
properties:
seconds:
description: Seconds is the number of seconds
@ -2894,8 +2928,8 @@ spec:
tcpSocket:
description: |-
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
for the backward compatibility. There are no validation of this field and
lifecycle hooks will fail in runtime when tcp handler is specified.
for backward compatibility. There is no validation of this field and
lifecycle hooks will fail at runtime when it is specified.
properties:
host:
description: 'Optional: Host name to connect
@ -2927,7 +2961,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
properties:
exec:
description: Exec specifies the action to take.
description: Exec specifies a command to execute
in the container.
properties:
command:
description: |-
@ -2942,7 +2977,7 @@ spec:
x-kubernetes-list-type: atomic
type: object
httpGet:
description: HTTPGet specifies the http request
description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@ -2993,8 +3028,8 @@ spec:
- port
type: object
sleep:
description: Sleep represents the duration that
the container should sleep before being terminated.
description: Sleep represents a duration that
the container should sleep.
properties:
seconds:
description: Seconds is the number of seconds
@ -3007,8 +3042,8 @@ spec:
tcpSocket:
description: |-
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
for the backward compatibility. There are no validation of this field and
lifecycle hooks will fail in runtime when tcp handler is specified.
for backward compatibility. There is no validation of this field and
lifecycle hooks will fail at runtime when it is specified.
properties:
host:
description: 'Optional: Host name to connect
@ -3027,6 +3062,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@ -3036,7 +3077,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
properties:
exec:
description: Exec specifies the action to take.
description: Exec specifies a command to execute
in the container.
properties:
command:
description: |-
@ -3057,8 +3099,7 @@ spec:
format: int32
type: integer
grpc:
description: GRPC specifies an action involving
a GRPC port.
description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@ -3077,7 +3118,7 @@ spec:
- port
type: object
httpGet:
description: HTTPGet specifies the http request
description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@ -3145,7 +3186,7 @@ spec:
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving
description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@ -3251,7 +3292,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
properties:
exec:
description: Exec specifies the action to take.
description: Exec specifies a command to execute
in the container.
properties:
command:
description: |-
@ -3272,8 +3314,7 @@ spec:
format: int32
type: integer
grpc:
description: GRPC specifies an action involving
a GRPC port.
description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@ -3292,7 +3333,7 @@ spec:
- port
type: object
httpGet:
description: HTTPGet specifies the http request
description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@ -3360,7 +3401,7 @@ spec:
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving
description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@ -3710,7 +3751,8 @@ spec:
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
properties:
exec:
description: Exec specifies the action to take.
description: Exec specifies a command to execute
in the container.
properties:
command:
description: |-
@ -3731,8 +3773,7 @@ spec:
format: int32
type: integer
grpc:
description: GRPC specifies an action involving
a GRPC port.
description: GRPC specifies a GRPC HealthCheckRequest.
properties:
port:
description: Port number of the gRPC service.
@ -3751,7 +3792,7 @@ spec:
- port
type: object
httpGet:
description: HTTPGet specifies the http request
description: HTTPGet specifies an HTTP GET request
to perform.
properties:
host:
@ -3819,7 +3860,7 @@ spec:
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving
description: TCPSocket specifies a connection to
a TCP port.
properties:
host:
@ -5167,9 +5208,27 @@ spec:
description: HTTPSource specifies how to download the CRD tarball
via either HTTP or HTTPS protocol.
properties:
proxy:
description: |-
Proxy specifies the configuration of a proxy server to use when downloading the CRD tarball.
When set, the operator will use the configuration to determine how to establish a connection to the proxy to fetch the tarball from the URL specified above.
This is useful in environments where direct access to the server hosting the CRD tarball is restricted and a proxy must be used to reach that server.
If a proxy configuration is not set, the operator will attempt to download the tarball directly from the URL specified above without using a proxy.
properties:
proxyURL:
description: |-
ProxyURL specifies the HTTP/HTTPS proxy server URL to use when downloading the CRD tarball.
This is useful in environments where direct access to the server hosting the CRD tarball is restricted and a proxy must be used to reach that server.
The format should be a valid URL, e.g., "http://proxy.example.com:8080".
type: string
required:
- proxyURL
type: object
url:
description: URL specifies the URL of the CRD tarball resource.
type: string
required:
- url
type: object
type: object
customCertificate:
@ -5197,6 +5256,13 @@ spec:
referenced.
type: string
type: object
leafCertValidityDays:
description: |-
LeafCertValidityDays specifies the validity period of leaf certificates (e.g., API Server certificate) in days.
If not specified, the default validity period of 1 year will be used.
format: int32
minimum: 1
type: integer
type: object
featureGates:
additionalProperties:
@ -5263,6 +5329,12 @@ spec:
required:
- registry
type: object
suspend:
description: |-
Suspend indicates that the operator should suspend reconciliation
for this Karmada control plane and all its managed resources.
Karmada instances for which this field is not explicitly set to `true` will continue to be reconciled as usual.
type: boolean
type: object
status:
description: Most recently observed status of the Karmada.

View File

@ -44,6 +44,13 @@ spec:
- /bin/karmada-operator
- --leader-elect-resource-namespace={{ .Release.Namespace }}
- --v=2
{{- range .Values.operator.extraArgs }}
- {{ . }}
{{- end }}
{{- with .Values.operator.env }}
env:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- if .Values.operator.resources }}
resources: {{- toYaml .Values.operator.resources | nindent 12 }}
{{- end }}

View File

@ -58,7 +58,31 @@ operator:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param.resources
## @param operator.env List of environment variables to inject
##
## - Each entry must be a valid Kubernetes EnvVar object.
## - Supports both literal values and valueFrom references (ConfigMap, Secret, fieldRef, etc.).
## - If omitted or set to an empty array (`[]`), no env stanza will be included.
##
## A sample stanza is shown below.
##
# env:
# - name: http_proxy
# value: "http://best-awesome-proxy.com:8080"
# - name: https_proxy
# value: "http://best-awesome-proxy.com:8080"
# - name: no_proxy
# value: "localhost,127.0.0.1,*.svc,*.cluster.local"
## @param operator.extraArgs List of extra arguments for the operator binary
##
## A sample stanza is shown below.
##
# extraArgs:
# - --arg1=val1
# - --arg2
resources: {}
# If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.

View File

@ -108,9 +108,9 @@ The command removes all the Kubernetes components associated with the chart and
> **Note**: There are some RBAC resources that are used by the `preJob` that can not be deleted by the `uninstall` command above. You might have to clean them manually with tools like `kubectl`. You can clean them by commands:
```console
kubectl delete sa/karmada-pre-job -nkarmada-system
kubectl delete clusterRole/karmada-pre-job
kubectl delete clusterRoleBinding/karmada-pre-job
kubectl delete sa/karmada-hook-job -nkarmada-system
kubectl delete clusterRole/karmada-hook-job
kubectl delete clusterRoleBinding/karmada-hook-job
kubectl delete ns karmada-system
```
@ -272,6 +272,7 @@ helm install karmada-scheduler-estimator -n karmada-system ./charts/karmada
| `scheduler.affinity` | Affinity of the scheduler | `{}` |
| `scheduler.tolerations` | Tolerations of the scheduler | `[]` |
| `scheduler.strategy` | Strategy of the scheduler | `{"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "0", "maxSurge": "50%"} }` |
| `scheduler.enableSchedulerEstimator` | Enable calling cluster scheduler estimator for adjusting replicas | `false` |
| `webhook.labels` | Labels of the webhook deployment | `{"app": "karmada-webhook"}` |
| `webhook.replicaCount` | Target replicas of the webhook | `1` |
| `webhook.podLabels` | Labels of the webhook pods | `{}` |

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: workloadrebalancers.apps.karmada.io
spec:
group: apps.karmada.io

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: cronfederatedhpas.autoscaling.karmada.io
spec:
group: autoscaling.karmada.io

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: federatedhpas.autoscaling.karmada.io
spec:
group: autoscaling.karmada.io
@ -82,7 +82,9 @@ spec:
policies:
description: |-
policies is a list of potential scaling polices which can be used during scaling.
At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
If not set, use the default values:
- For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
- For scale down: allow all pods to be removed in a 15s window.
items:
description: HPAScalingPolicy is a single policy which must
hold true for a specified past interval.
@ -124,6 +126,24 @@ spec:
- For scale down: 300 (i.e. the stabilization window is 300 seconds long).
format: int32
type: integer
tolerance:
anyOf:
- type: integer
- type: string
description: |-
tolerance is the tolerance on the ratio between the current and desired
metric value under which no updates are made to the desired number of
replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
set, the default cluster-wide tolerance is applied (by default 10%).
For example, if autoscaling is configured with a memory consumption target of 100Mi,
and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
This is an alpha field and requires enabling the HPAConfigurableTolerance
feature gate.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
scaleUp:
description: |-
@ -136,7 +156,9 @@ spec:
policies:
description: |-
policies is a list of potential scaling polices which can be used during scaling.
At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
If not set, use the default values:
- For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
- For scale down: allow all pods to be removed in a 15s window.
items:
description: HPAScalingPolicy is a single policy which must
hold true for a specified past interval.
@ -178,6 +200,24 @@ spec:
- For scale down: 300 (i.e. the stabilization window is 300 seconds long).
format: int32
type: integer
tolerance:
anyOf:
- type: integer
- type: string
description: |-
tolerance is the tolerance on the ratio between the current and desired
metric value under which no updates are made to the desired number of
replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
set, the default cluster-wide tolerance is applied (by default 10%).
For example, if autoscaling is configured with a memory consumption target of 100Mi,
and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
This is an alpha field and requires enabling the HPAConfigurableTolerance
feature gate.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
type: object
maxReplicas:
@ -209,7 +249,6 @@ spec:
each pod of the current scale target (e.g. CPU or memory). Such metrics are
built in to Kubernetes, and have special scaling options on top of those
available to normal per-pod metrics using the "pods" source.
This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
properties:
container:
description: container is the name of the container in the
@ -650,8 +689,6 @@ spec:
description: |-
type is the type of metric source. It should be one of "ContainerResource", "External",
"Object", "Pods" or "Resource", each mapping to a matching field in the object.
Note: "ContainerResource" type is available on when the feature-gate
HPAContainerMetrics is enabled
type: string
required:
- type
@ -1147,8 +1184,6 @@ spec:
description: |-
type is the type of metric source. It will be one of "ContainerResource", "External",
"Object", "Pods" or "Resource", each corresponds to a matching field in the object.
Note: "ContainerResource" type is available on when the feature-gate
HPAContainerMetrics is enabled
type: string
required:
- type

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: resourceinterpretercustomizations.config.karmada.io
spec:
group: config.karmada.io
@ -60,6 +60,59 @@ spec:
customizations:
description: Customizations describe the interpretation rules.
properties:
componentResource:
description: |-
ComponentResource describes the rules for Karmada to discover the resource requirements
for multiple components from the given object.
This is designed for CRDs with multiple components (e.g., FlinkDeployment), but
can also be used for single-component resources like Deployment.
If implemented, the controller will use this to obtain per-component replica and resource
requirements, and will not call ReplicaResource.
If not implemented, the controller will fall back to ReplicaResource for backward compatibility.
This will only be used when the feature gate 'MultiplePodTemplatesScheduling' is enabled.
properties:
luaScript:
description: |-
LuaScript holds the Lua script that is used to extract the desired replica count and resource
requirements for each component of the resource.
The script should implement a function as follows:
```
luaScript: >
function GetComponents(desiredObj)
local components = {}
local jobManagerComponent = {
name = "jobmanager",
replicas = desiredObj.spec.jobManager.replicas
}
table.insert(components, jobManagerComponent)
local taskManagerComponent = {
name = "taskmanager",
replicas = desiredObj.spec.taskManager.replicas
}
table.insert(components, taskManagerComponent)
return components
end
```
The content of the LuaScript needs to be a whole function including both
declaration and implementation.
The parameters will be supplied by the system:
- desiredObj: the object represents the configuration to be applied
to the member cluster.
The function expects one return value:
- components: the resource requirements for each component.
The returned value will be set into a ResourceBinding or ClusterResourceBinding.
type: string
required:
- luaScript
type: object
dependencyInterpretation:
description: |-
DependencyInterpretation describes the rules for Karmada to analyze the

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: resourceinterpreterwebhookconfigurations.config.karmada.io
spec:
group: config.karmada.io

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: multiclusteringresses.networking.karmada.io
spec:
group: networking.karmada.io

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: multiclusterservices.networking.karmada.io
spec:
group: networking.karmada.io
@ -245,6 +245,8 @@ spec:
Ports is a list of records of service ports
If used, every port defined in the service should have an entry in it
items:
description: PortStatus represents the error condition
of a service port
properties:
error:
description: |-

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: clusteroverridepolicies.policy.karmada.io
spec:
group: policy.karmada.io

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: clusterpropagationpolicies.policy.karmada.io
spec:
group: policy.karmada.io
@ -153,16 +153,19 @@ spec:
format: int32
type: integer
purgeMode:
default: Graciously
default: Gracefully
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Immediately", "Graciously" and "Never".
Defaults to "Graciously".
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
and "Graciously"(deprecated).
Defaults to "Gracefully".
enum:
- Directly
- Gracefully
- Never
- Immediately
- Graciously
- Never
type: string
statePreservation:
description: |-
@ -223,6 +226,83 @@ spec:
required:
- decisionConditions
type: object
cluster:
description: |-
Cluster indicates failover behaviors in case of cluster failure.
If this value is nil, the failover behavior in case of cluster failure
will be controlled by the controller's no-execute-taint-eviction-purge-mode
parameter.
If set, the failover behavior in case of cluster failure will be defined
by this value.
properties:
purgeMode:
default: Gracefully
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Directly", "Gracefully".
Defaults to "Gracefully".
enum:
- Directly
- Gracefully
type: string
statePreservation:
description: |-
StatePreservation defines the policy for preserving and restoring state data
during failover events for stateful applications.
When an application fails over from one cluster to another, this policy enables
the extraction of critical data from the original resource configuration.
Upon successful migration, the extracted data is then re-injected into the new
resource, ensuring that the application can resume operation with its previous
state intact.
This is particularly useful for stateful applications where maintaining data
consistency across failover events is crucial.
If not specified, means no state data will be preserved.
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
which is alpha.
properties:
rules:
description: |-
Rules contains a list of StatePreservationRule configurations.
Each rule specifies a JSONPath expression targeting specific pieces of
state data to be preserved during failover events. An AliasLabelName is associated
with each rule, serving as a label key when the preserved data is passed
to the new cluster.
items:
description: |-
StatePreservationRule defines a single rule for state preservation.
It includes a JSONPath expression and an alias name that will be used
as a label key when passing state information to the new cluster.
properties:
aliasLabelName:
description: |-
AliasLabelName is the name that will be used as a label key when the preserved
data is passed to the new cluster. This facilitates the injection of the
preserved state back into the application resources during recovery.
type: string
jsonPath:
description: |-
JSONPath is the JSONPath template used to identify the state data
to be preserved from the original resource configuration.
The JSONPath syntax follows the Kubernetes specification:
https://kubernetes.io/docs/reference/kubectl/jsonpath/
Note: The JSONPath expression will start searching from the "status" field of
the API resource object by default. For example, to extract the "availableReplicas"
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
"{.status.availableReplicas}".
type: string
required:
- aliasLabelName
- jsonPath
type: object
type: array
required:
- rules
type: object
type: object
type: object
placement:
description: Placement represents the rule for select clusters to

View File

@ -0,0 +1,257 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.18.0
name: clustertaintpolicies.policy.karmada.io
spec:
group: policy.karmada.io
names:
kind: ClusterTaintPolicy
listKind: ClusterTaintPolicyList
plural: clustertaintpolicies
singular: clustertaintpolicy
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: |-
ClusterTaintPolicy automates taint management on Cluster objects based
on declarative conditions.
The system evaluates AddOnConditions to determine when to add taints,
and RemoveOnConditions to determine when to remove taints.
AddOnConditions are evaluated before RemoveOnConditions.
Taints are NEVER automatically removed when the ClusterTaintPolicy is deleted.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Spec represents the desired behavior of ClusterTaintPolicy.
properties:
addOnConditions:
description: |-
AddOnConditions defines the conditions to match for triggering
the controller to add taints on the cluster object.
The match conditions are ANDed.
If AddOnConditions is empty, no taints will be added.
items:
description: |-
MatchCondition represents the condition match detail of activating the failover
relevant taints on target clusters.
properties:
conditionType:
description: ConditionType specifies the ClusterStatus condition
type.
type: string
operator:
description: |-
Operator represents a relationship to a set of values.
Valid operators are In, NotIn.
type: string
statusValues:
description: |-
StatusValues is an array of metav1.ConditionStatus values.
The item specifies the ClusterStatus condition status.
items:
type: string
type: array
required:
- conditionType
- operator
- statusValues
type: object
type: array
removeOnConditions:
description: |-
RemoveOnConditions defines the conditions to match for triggering
the controller to remove taints from the cluster object.
The match conditions are ANDed.
If RemoveOnConditions is empty, no taints will be removed.
items:
description: |-
MatchCondition represents the condition match detail of activating the failover
relevant taints on target clusters.
properties:
conditionType:
description: ConditionType specifies the ClusterStatus condition
type.
type: string
operator:
description: |-
Operator represents a relationship to a set of values.
Valid operators are In, NotIn.
type: string
statusValues:
description: |-
StatusValues is an array of metav1.ConditionStatus values.
The item specifies the ClusterStatus condition status.
items:
type: string
type: array
required:
- conditionType
- operator
- statusValues
type: object
type: array
taints:
description: |-
Taints specifies the taints that need to be added or removed on
the cluster object which match with TargetClusters.
If the Taints is modified, the system will process the taints based on
the latest value of Taints during the next condition-triggered execution,
regardless of whether the taint has been added or removed.
items:
description: Taint describes the taint that needs to be applied
to the cluster.
properties:
effect:
description: Effect represents the taint effect to be applied
to a cluster.
type: string
key:
description: Key represents the taint key to be applied to a
cluster.
type: string
value:
description: Value represents the taint value corresponding
to the taint key.
type: string
required:
- effect
- key
type: object
minItems: 1
type: array
targetClusters:
description: |-
TargetClusters specifies the clusters that ClusterTaintPolicy needs
to pay attention to.
For clusters that no longer match the TargetClusters, the taints
will be kept unchanged.
If targetClusters is not set, any cluster can be selected.
properties:
clusterNames:
description: ClusterNames is the list of clusters to be selected.
items:
type: string
type: array
exclude:
description: ExcludedClusters is the list of clusters to be ignored.
items:
type: string
type: array
fieldSelector:
description: |-
FieldSelector is a filter to select member clusters by fields.
The key(field) of the match expression should be 'provider', 'region', or 'zone',
and the operator of the match expression should be 'In' or 'NotIn'.
If non-nil and non-empty, only the clusters match this filter will be selected.
properties:
matchExpressions:
description: A list of field selector requirements.
items:
description: |-
A node selector requirement is a selector that contains values, a key, and an operator
that relates the key and values.
properties:
key:
description: The label key that the selector applies
to.
type: string
operator:
description: |-
Represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: |-
An array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. If the operator is Gt or Lt, the values
array must have a single element, which will be interpreted as an integer.
This array is replaced during a strategic merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
type: object
labelSelector:
description: |-
LabelSelector is a filter to select member clusters by labels.
If non-nil and non-empty, only the clusters match this filter will be selected.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
type: object
required:
- taints
type: object
required:
- spec
type: object
served: true
storage: true

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: federatedresourcequotas.policy.karmada.io
spec:
group: policy.karmada.io
@ -16,7 +16,14 @@ spec:
singular: federatedresourcequota
scope: Namespaced
versions:
- name: v1alpha1
- additionalPrinterColumns:
- jsonPath: .status.overall
name: OVERALL
type: string
- jsonPath: .status.overallUsed
name: OVERALL_USED
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: FederatedResourceQuota sets aggregate quota restrictions enforced
@ -54,9 +61,15 @@ spec:
type: object
staticAssignments:
description: |-
StaticAssignments represents the subset of desired hard limits for each cluster.
Note: for clusters not present in this list, Karmada will set an empty ResourceQuota to them, which means these
clusters will have no quotas in the referencing namespace.
StaticAssignments specifies ResourceQuota settings for specific clusters.
If non-empty, Karmada will create ResourceQuotas in the corresponding clusters.
Clusters not listed here or when StaticAssignments is empty will have no ResourceQuotas created.
This field addresses multi-cluster configuration management challenges by allowing centralized
control over ResourceQuotas across clusters.
Note: The Karmada scheduler currently does NOT use this configuration for scheduling decisions.
Future updates may integrate it into the scheduling logic.
items:
description: StaticClusterAssignment represents the set of desired
hard limits for a specific cluster.

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: overridepolicies.policy.karmada.io
spec:
group: policy.karmada.io

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: propagationpolicies.policy.karmada.io
spec:
group: policy.karmada.io
@ -150,16 +150,19 @@ spec:
format: int32
type: integer
purgeMode:
default: Graciously
default: Gracefully
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Immediately", "Graciously" and "Never".
Defaults to "Graciously".
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
and "Graciously"(deprecated).
Defaults to "Gracefully".
enum:
- Directly
- Gracefully
- Never
- Immediately
- Graciously
- Never
type: string
statePreservation:
description: |-
@ -220,6 +223,83 @@ spec:
required:
- decisionConditions
type: object
cluster:
description: |-
Cluster indicates failover behaviors in case of cluster failure.
If this value is nil, the failover behavior in case of cluster failure
will be controlled by the controller's no-execute-taint-eviction-purge-mode
parameter.
If set, the failover behavior in case of cluster failure will be defined
by this value.
properties:
purgeMode:
default: Gracefully
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Directly", "Gracefully".
Defaults to "Gracefully".
enum:
- Directly
- Gracefully
type: string
statePreservation:
description: |-
StatePreservation defines the policy for preserving and restoring state data
during failover events for stateful applications.
When an application fails over from one cluster to another, this policy enables
the extraction of critical data from the original resource configuration.
Upon successful migration, the extracted data is then re-injected into the new
resource, ensuring that the application can resume operation with its previous
state intact.
This is particularly useful for stateful applications where maintaining data
consistency across failover events is crucial.
If not specified, means no state data will be preserved.
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
which is alpha.
properties:
rules:
description: |-
Rules contains a list of StatePreservationRule configurations.
Each rule specifies a JSONPath expression targeting specific pieces of
state data to be preserved during failover events. An AliasLabelName is associated
with each rule, serving as a label key when the preserved data is passed
to the new cluster.
items:
description: |-
StatePreservationRule defines a single rule for state preservation.
It includes a JSONPath expression and an alias name that will be used
as a label key when passing state information to the new cluster.
properties:
aliasLabelName:
description: |-
AliasLabelName is the name that will be used as a label key when the preserved
data is passed to the new cluster. This facilitates the injection of the
preserved state back into the application resources during recovery.
type: string
jsonPath:
description: |-
JSONPath is the JSONPath template used to identify the state data
to be preserved from the original resource configuration.
The JSONPath syntax follows the Kubernetes specification:
https://kubernetes.io/docs/reference/kubectl/jsonpath/
Note: The JSONPath expression will start searching from the "status" field of
the API resource object by default. For example, to extract the "availableReplicas"
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
"{.status.availableReplicas}".
type: string
required:
- aliasLabelName
- jsonPath
type: object
type: array
required:
- rules
type: object
type: object
type: object
placement:
description: Placement represents the rule for select clusters to

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: remedies.remedy.karmada.io
spec:
group: remedy.karmada.io

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: clusterresourcebindings.work.karmada.io
spec:
group: work.karmada.io
@ -261,6 +261,199 @@ spec:
- name
type: object
type: array
components:
description: |-
Components represents the requirements of multiple pod templates of the referencing resource.
It is designed to support workloads that consist of multiple pod templates,
such as distributed training jobs (e.g., PyTorch, TensorFlow) and big data workloads (e.g., FlinkDeployment),
where each workload is composed of more than one pod template. It is also capable of representing
single-component workloads, such as Deployment.
Note: This field is intended to replace the legacy ReplicaRequirements and Replicas fields above.
It is only populated when the MultiplePodTemplatesScheduling feature gate is enabled.
items:
description: Component represents the requirements for a specific
component.
properties:
name:
description: |-
Name of this component.
It is required when the resource contains multiple components to ensure proper identification,
and must also be unique within the same resource.
maxLength: 32
type: string
replicaRequirements:
description: ReplicaRequirements represents the requirements
required by each replica for this component.
properties:
nodeClaim:
description: NodeClaim represents the node claim HardNodeAffinity,
NodeSelector and Tolerations required by each replica.
properties:
hardNodeAffinity:
description: |-
A node selector represents the union of the results of one or more label queries over a set of
nodes; that is, it represents the OR of the selectors represented by the node selector terms.
Note that only PodSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
is included here because it has a hard limit on pod scheduling.
properties:
nodeSelectorTerms:
description: Required. A list of node selector terms.
The terms are ORed.
items:
description: |-
A null or empty node selector term matches no objects. The requirements of
them are ANDed.
The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: |-
A node selector requirement is a selector that contains values, a key, and an operator
that relates the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: |-
Represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: |-
An array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. If the operator is Gt or Lt, the values
array must have a single element, which will be interpreted as an integer.
This array is replaced during a strategic merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: |-
A node selector requirement is a selector that contains values, a key, and an operator
that relates the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: |-
Represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: |-
An array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. If the operator is Gt or Lt, the values
array must have a single element, which will be interpreted as an integer.
This array is replaced during a strategic merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
type: object
x-kubernetes-map-type: atomic
type: array
x-kubernetes-list-type: atomic
required:
- nodeSelectorTerms
type: object
x-kubernetes-map-type: atomic
nodeSelector:
additionalProperties:
type: string
description: |-
NodeSelector is a selector which must be true for the pod to fit on a node.
Selector which must match a node's labels for the pod to be scheduled on that node.
type: object
tolerations:
description: If specified, the pod's tolerations.
items:
description: |-
The pod this Toleration is attached to tolerates any taint that matches
the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: |-
Effect indicates the taint effect to match. Empty means match all taint effects.
When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: |-
Key is the taint key that the toleration applies to. Empty means match all taint keys.
If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: |-
Operator represents a key's relationship to the value.
Valid operators are Exists and Equal. Defaults to Equal.
Exists is equivalent to wildcard for value, so that a pod can
tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: |-
TolerationSeconds represents the period of time the toleration (which must be
of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
it is not set, which means tolerate the taint forever (do not evict). Zero and
negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: |-
Value is the taint value the toleration matches to.
If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
priorityClassName:
description: PriorityClassName represents the resources
priorityClassName
type: string
resourceRequest:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ResourceRequest represents the resources required
by each replica.
type: object
type: object
replicas:
description: Replicas represents the replica number of the resource's
component.
format: int32
type: integer
required:
- name
- replicas
type: object
type: array
conflictResolution:
default: Abort
description: |-
@ -315,16 +508,19 @@ spec:
format: int32
type: integer
purgeMode:
default: Graciously
default: Gracefully
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Immediately", "Graciously" and "Never".
Defaults to "Graciously".
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
and "Graciously"(deprecated).
Defaults to "Gracefully".
enum:
- Directly
- Gracefully
- Never
- Immediately
- Graciously
- Never
type: string
statePreservation:
description: |-
@ -385,6 +581,83 @@ spec:
required:
- decisionConditions
type: object
cluster:
description: |-
Cluster indicates failover behaviors in case of cluster failure.
If this value is nil, the failover behavior in case of cluster failure
will be controlled by the controller's no-execute-taint-eviction-purge-mode
parameter.
If set, the failover behavior in case of cluster failure will be defined
by this value.
properties:
purgeMode:
default: Gracefully
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Directly", "Gracefully".
Defaults to "Gracefully".
enum:
- Directly
- Gracefully
type: string
statePreservation:
description: |-
StatePreservation defines the policy for preserving and restoring state data
during failover events for stateful applications.
When an application fails over from one cluster to another, this policy enables
the extraction of critical data from the original resource configuration.
Upon successful migration, the extracted data is then re-injected into the new
resource, ensuring that the application can resume operation with its previous
state intact.
This is particularly useful for stateful applications where maintaining data
consistency across failover events is crucial.
If not specified, means no state data will be preserved.
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
which is alpha.
properties:
rules:
description: |-
Rules contains a list of StatePreservationRule configurations.
Each rule specifies a JSONPath expression targeting specific pieces of
state data to be preserved during failover events. An AliasLabelName is associated
with each rule, serving as a label key when the preserved data is passed
to the new cluster.
items:
description: |-
StatePreservationRule defines a single rule for state preservation.
It includes a JSONPath expression and an alias name that will be used
as a label key when passing state information to the new cluster.
properties:
aliasLabelName:
description: |-
AliasLabelName is the name that will be used as a label key when the preserved
data is passed to the new cluster. This facilitates the injection of the
preserved state back into the application resources during recovery.
type: string
jsonPath:
description: |-
JSONPath is the JSONPath template used to identify the state data
to be preserved from the original resource configuration.
The JSONPath syntax follows the Kubernetes specification:
https://kubernetes.io/docs/reference/kubectl/jsonpath/
Note: The JSONPath expression will start searching from the "status" field of
the API resource object by default. For example, to extract the "availableReplicas"
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
"{.status.availableReplicas}".
type: string
required:
- aliasLabelName
- jsonPath
type: object
type: array
required:
- rules
type: object
type: object
type: object
gracefulEvictionTasks:
description: |-
@ -454,10 +727,12 @@ spec:
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Immediately", "Graciously" and "Never".
Valid options are "Immediately", "Directly", "Graciously", "Gracefully" and "Never".
enum:
- Immediately
- Directly
- Graciously
- Gracefully
- Never
type: string
reason:

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: resourcebindings.work.karmada.io
spec:
group: work.karmada.io
@ -261,6 +261,199 @@ spec:
- name
type: object
type: array
components:
description: |-
Components represents the requirements of multiple pod templates of the referencing resource.
It is designed to support workloads that consist of multiple pod templates,
such as distributed training jobs (e.g., PyTorch, TensorFlow) and big data workloads (e.g., FlinkDeployment),
where each workload is composed of more than one pod template. It is also capable of representing
single-component workloads, such as Deployment.
Note: This field is intended to replace the legacy ReplicaRequirements and Replicas fields above.
It is only populated when the MultiplePodTemplatesScheduling feature gate is enabled.
items:
description: Component represents the requirements for a specific
component.
properties:
name:
description: |-
Name of this component.
It is required when the resource contains multiple components to ensure proper identification,
and must also be unique within the same resource.
maxLength: 32
type: string
replicaRequirements:
description: ReplicaRequirements represents the requirements
required by each replica for this component.
properties:
nodeClaim:
description: NodeClaim represents the node claim HardNodeAffinity,
NodeSelector and Tolerations required by each replica.
properties:
hardNodeAffinity:
description: |-
A node selector represents the union of the results of one or more label queries over a set of
nodes; that is, it represents the OR of the selectors represented by the node selector terms.
Note that only PodSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
is included here because it has a hard limit on pod scheduling.
properties:
nodeSelectorTerms:
description: Required. A list of node selector terms.
The terms are ORed.
items:
description: |-
A null or empty node selector term matches no objects. The requirements of
them are ANDed.
The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: |-
A node selector requirement is a selector that contains values, a key, and an operator
that relates the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: |-
Represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: |-
An array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. If the operator is Gt or Lt, the values
array must have a single element, which will be interpreted as an integer.
This array is replaced during a strategic merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: |-
A node selector requirement is a selector that contains values, a key, and an operator
that relates the key and values.
properties:
key:
description: The label key that the
selector applies to.
type: string
operator:
description: |-
Represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: |-
An array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. If the operator is Gt or Lt, the values
array must have a single element, which will be interpreted as an integer.
This array is replaced during a strategic merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
type: object
x-kubernetes-map-type: atomic
type: array
x-kubernetes-list-type: atomic
required:
- nodeSelectorTerms
type: object
x-kubernetes-map-type: atomic
nodeSelector:
additionalProperties:
type: string
description: |-
NodeSelector is a selector which must be true for the pod to fit on a node.
Selector which must match a node's labels for the pod to be scheduled on that node.
type: object
tolerations:
description: If specified, the pod's tolerations.
items:
description: |-
The pod this Toleration is attached to tolerates any taint that matches
the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: |-
Effect indicates the taint effect to match. Empty means match all taint effects.
When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: |-
Key is the taint key that the toleration applies to. Empty means match all taint keys.
If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: |-
Operator represents a key's relationship to the value.
Valid operators are Exists and Equal. Defaults to Equal.
Exists is equivalent to wildcard for value, so that a pod can
tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: |-
TolerationSeconds represents the period of time the toleration (which must be
of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
it is not set, which means tolerate the taint forever (do not evict). Zero and
negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: |-
Value is the taint value the toleration matches to.
If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
priorityClassName:
description: PriorityClassName represents the resources
priorityClassName
type: string
resourceRequest:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ResourceRequest represents the resources required
by each replica.
type: object
type: object
replicas:
description: Replicas represents the replica number of the resource's
component.
format: int32
type: integer
required:
- name
- replicas
type: object
type: array
conflictResolution:
default: Abort
description: |-
@ -315,16 +508,19 @@ spec:
format: int32
type: integer
purgeMode:
default: Graciously
default: Gracefully
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Immediately", "Graciously" and "Never".
Defaults to "Graciously".
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
and "Graciously"(deprecated).
Defaults to "Gracefully".
enum:
- Directly
- Gracefully
- Never
- Immediately
- Graciously
- Never
type: string
statePreservation:
description: |-
@ -385,6 +581,83 @@ spec:
required:
- decisionConditions
type: object
cluster:
description: |-
Cluster indicates failover behaviors in case of cluster failure.
If this value is nil, the failover behavior in case of cluster failure
will be controlled by the controller's no-execute-taint-eviction-purge-mode
parameter.
If set, the failover behavior in case of cluster failure will be defined
by this value.
properties:
purgeMode:
default: Gracefully
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Directly", "Gracefully".
Defaults to "Gracefully".
enum:
- Directly
- Gracefully
type: string
statePreservation:
description: |-
StatePreservation defines the policy for preserving and restoring state data
during failover events for stateful applications.
When an application fails over from one cluster to another, this policy enables
the extraction of critical data from the original resource configuration.
Upon successful migration, the extracted data is then re-injected into the new
resource, ensuring that the application can resume operation with its previous
state intact.
This is particularly useful for stateful applications where maintaining data
consistency across failover events is crucial.
If not specified, means no state data will be preserved.
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
which is alpha.
properties:
rules:
description: |-
Rules contains a list of StatePreservationRule configurations.
Each rule specifies a JSONPath expression targeting specific pieces of
state data to be preserved during failover events. An AliasLabelName is associated
with each rule, serving as a label key when the preserved data is passed
to the new cluster.
items:
description: |-
StatePreservationRule defines a single rule for state preservation.
It includes a JSONPath expression and an alias name that will be used
as a label key when passing state information to the new cluster.
properties:
aliasLabelName:
description: |-
AliasLabelName is the name that will be used as a label key when the preserved
data is passed to the new cluster. This facilitates the injection of the
preserved state back into the application resources during recovery.
type: string
jsonPath:
description: |-
JSONPath is the JSONPath template used to identify the state data
to be preserved from the original resource configuration.
The JSONPath syntax follows the Kubernetes specification:
https://kubernetes.io/docs/reference/kubectl/jsonpath/
Note: The JSONPath expression will start searching from the "status" field of
the API resource object by default. For example, to extract the "availableReplicas"
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
"{.status.availableReplicas}".
type: string
required:
- aliasLabelName
- jsonPath
type: object
type: array
required:
- rules
type: object
type: object
type: object
gracefulEvictionTasks:
description: |-
@ -454,10 +727,12 @@ spec:
description: |-
PurgeMode represents how to deal with the legacy applications on the
cluster from which the application is migrated.
Valid options are "Immediately", "Graciously" and "Never".
Valid options are "Immediately", "Directly", "Graciously", "Gracefully" and "Never".
enum:
- Immediately
- Directly
- Graciously
- Gracefully
- Never
type: string
reason:

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.18.0
name: works.work.karmada.io
spec:
group: work.karmada.io

View File

@ -3,6 +3,7 @@ resources:
- bases/multicluster/multicluster.x-k8s.io_serviceimports.yaml
- bases/policy/policy.karmada.io_clusteroverridepolicies.yaml
- bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml
- bases/policy/policy.karmada.io_clustertaintpolicies.yaml
- bases/policy/policy.karmada.io_federatedresourcequotas.yaml
- bases/policy/policy.karmada.io_overridepolicies.yaml
- bases/policy/policy.karmada.io_propagationpolicies.yaml

View File

@ -152,7 +152,6 @@ app: {{$name}}-controller-manager
{{- end }}
{{- end -}}
{{- define "karmada.scheduler.labels" -}}
{{ $name := include "karmada.name" . }}
{{- if .Values.scheduler.labels -}}
@ -173,7 +172,6 @@ app: {{$name}}-scheduler
{{- end }}
{{- end -}}
{{- define "karmada.descheduler.labels" -}}
{{ $name := include "karmada.name" . }}
{{- if .Values.descheduler.labels -}}
@ -207,7 +205,6 @@ app: {{$name}}
{{- end -}}
{{- end -}}
{{- define "karmada.webhook.labels" -}}
{{ $name := include "karmada.name" .}}
{{- if .Values.webhook.labels }}
@ -306,6 +303,10 @@ app: {{- include "karmada.name" .}}-search
{{- include "karmada.commonLabels" . -}}
{{- end -}}
{{- define "karmada.preUpdateJob.labels" -}}
{{- include "karmada.commonLabels" . -}}
{{- end -}}
{{- define "karmada.staticResourceJob.labels" -}}
{{- include "karmada.commonLabels" . -}}
{{- end -}}
@ -356,6 +357,16 @@ app: {{- include "karmada.name" .}}-search
secretName: {{ $name }}-cert
{{- end -}}
{{/*
Common env for POD_IP
*/}}
{{- define "karmada.env.podIP" -}}
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
{{- end -}}
{{/*
Return the proper karmada internal etcd image name
*/}}
@ -545,35 +556,82 @@ Return the proper Docker Image Registry Secret Names
{{ include "common.images.pullSecrets" (dict "images" (list .Values.cfssl.image .Values.kubectl.image .Values.etcd.internal.image .Values.agent.image .Values.apiServer.image .Values.controllerManager.image .Values.descheduler.image .Values.schedulerEstimator.image .Values.scheduler.image .Values.webhook.image .Values.aggregatedApiServer.image .Values.metricsAdapter.image .Values.search.image .Values.kubeControllerManager.image) "global" .Values.global) }}
{{- end -}}
{{- /*
Generate the --feature-gates command line argument for karmada-controllerManager.
Iterates over .Values.controllerManager.featureGates and constructs a comma-separated key=value list.
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
If none are set, outputs nothing.
*/ -}}
{{- define "karmada.controllerManager.featureGates" -}}
{{- if (not (empty .Values.controllerManager.featureGates)) }}
{{- $featureGatesFlag := "" -}}
{{- if .Values.controllerManager.featureGates -}}
{{- $featureGates := list -}}
{{- range $key, $value := .Values.controllerManager.featureGates -}}
{{- if not (empty (toString $value)) }}
{{- $featureGatesFlag = cat $featureGatesFlag $key "=" $value "," -}}
{{- if not (empty (toString $value)) -}}
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
{{- end -}}
{{- end -}}
{{- if gt (len $featureGatesFlag) 0 }}
{{- $featureGatesFlag := trimSuffix "," $featureGatesFlag | nospace -}}
{{- printf "%s=%s" "--feature-gates" $featureGatesFlag -}}
{{- if $featureGates -}}
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "karmada.schedulerEstimator.featureGates" -}}
{{- $featureGatesArg := index . "featureGatesArg" -}}
{{- if (not (empty $featureGatesArg)) }}
{{- $featureGatesFlag := "" -}}
{{- range $key, $value := $featureGatesArg -}}
{{- if not (empty (toString $value)) }}
{{- $featureGatesFlag = cat $featureGatesFlag $key "=" $value "," -}}
{{- /*
Generate the --feature-gates command line argument for karmada-webhook.
Iterates over .Values.webhook.featureGates and constructs a comma-separated key=value list.
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
If none are set, outputs nothing.
*/ -}}
{{- define "karmada.webhook.featureGates" -}}
{{- if .Values.webhook.featureGates -}}
{{- $featureGates := list -}}
{{- range $key, $value := .Values.webhook.featureGates -}}
{{- if not (empty (toString $value)) -}}
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
{{- end -}}
{{- end -}}
{{- if $featureGates -}}
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if gt (len $featureGatesFlag) 0 }}
{{- $featureGatesFlag := trimSuffix "," $featureGatesFlag | nospace -}}
{{- printf "%s=%s" "--feature-gates" $featureGatesFlag -}}
{{- /*
Generate the --feature-gates command line argument for karmada-scheduler.
Iterates over .Values.scheduler.featureGates and constructs a comma-separated key=value list.
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
If none are set, outputs nothing.
*/ -}}
{{- define "karmada.scheduler.featureGates" -}}
{{- if .Values.scheduler.featureGates -}}
{{- $featureGates := list -}}
{{- range $key, $value := .Values.scheduler.featureGates -}}
{{- if not (empty (toString $value)) -}}
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
{{- end -}}
{{- end -}}
{{- if $featureGates -}}
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /*
Generate the --feature-gates command line argument for karmada-schedulerEstimator.
Iterates over .Values.schedulerEstimator.featureGates and constructs a comma-separated key=value list.
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
If none are set, outputs nothing.
*/ -}}
{{- define "karmada.schedulerEstimator.featureGates" -}}
{{- if .Values.schedulerEstimator.featureGates -}}
{{- $featureGates := list -}}
{{- range $key, $value := .Values.schedulerEstimator.featureGates -}}
{{- if not (empty (toString $value)) -}}
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
{{- end -}}
{{- end -}}
{{- if $featureGates -}}
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -274,4 +274,18 @@ webhooks:
sideEffects: None
admissionReviewVersions: [ "v1" ]
timeoutSeconds: 3
- name: resourcebinding.karmada.io
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: ["work.karmada.io"]
apiVersions: ["*"]
resources: ["resourcebindings"]
scope: "Namespaced"
clientConfig:
url: https://{{ $name }}-webhook.{{ $namespace }}.svc:443/validate-resourcebinding
{{- include "karmada.webhook.caBundle" . | nindent 6 }}
failurePolicy: Fail
sideEffects: NoneOnDryRun
admissionReviewVersions: ["v1"]
timeoutSeconds: 3
{{- end -}}

View File

@ -101,6 +101,8 @@ spec:
- name: {{ $name }}
image: {{ template "karmada.agent.image" . }}
imagePullPolicy: {{ .Values.agent.image.pullPolicy }}
env:
{{- include "karmada.env.podIP" . | nindent 12 }}
command:
- /bin/karmada-agent
- --karmada-kubeconfig=/etc/kubeconfig/kubeconfig
@ -110,7 +112,8 @@ spec:
{{- end }}
- --cluster-status-update-frequency=10s
- --leader-elect-resource-namespace={{ include "karmada.namespace" . }}
- --health-probe-bind-address=0.0.0.0:10357
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10357
- --v=4
livenessProbe:
httpGet:

View File

@ -43,6 +43,8 @@ spec:
- name: apiserver-cert
mountPath: /etc/kubernetes/pki
readOnly: true
env:
{{- include "karmada.env.podIP" . | nindent 12 }}
command:
- /bin/karmada-aggregated-apiserver
- --kubeconfig=/etc/kubeconfig
@ -67,6 +69,7 @@ spec:
- --audit-log-maxage=0
- --audit-log-maxbackup=0
- --tls-min-version=VersionTLS13
- --bind-address=$(POD_IP)
resources:
{{- toYaml .Values.aggregatedApiServer.resources | nindent 12 }}
readinessProbe:

View File

@ -73,6 +73,35 @@ spec:
- --max-requests-inflight={{ .Values.apiServer.maxRequestsInflight }}
- --max-mutating-requests-inflight={{ .Values.apiServer.maxMutatingRequestsInflight }}
- --tls-min-version=VersionTLS13
{{- with .Values.apiServer.oidc }}
{{- if .caFile }}
- --oidc-ca-file={{ .caFile }}
{{- end }}
{{- if .clientId }}
- --oidc-client-id={{ .clientId }}
{{- end }}
{{- if .groupsClaim }}
- --oidc-groups-claim={{ .groupsClaim }}
{{- end }}
{{- if .groupsPrefix }}
- --oidc-groups-prefix={{ .groupsPrefix }}
{{- end }}
{{- if .issuerUrl }}
- --oidc-issuer-url={{ .issuerUrl }}
{{- end }}
{{- if .requiredClaim }}
- --oidc-required-claim={{ .requiredClaim }}
{{- end }}
{{- if .signingAlgs }}
- --oidc-signing-algs={{ .signingAlgs }}
{{- end }}
{{- if .usernameClaim }}
- --oidc-username-claim={{ .usernameClaim }}
{{- end }}
{{- if .usernamePrefix }}
- --oidc-username-prefix={{ .usernamePrefix }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: 5443

View File

@ -50,13 +50,15 @@ spec:
- name: {{ $name }}-controller-manager
image: {{ template "karmada.controllerManager.image" . }}
imagePullPolicy: {{ .Values.controllerManager.image.pullPolicy }}
env:
{{- include "karmada.env.podIP" . | nindent 12 }}
command:
- /bin/karmada-controller-manager
- --kubeconfig=/etc/kubeconfig
- --cluster-status-update-frequency=10s
- --leader-elect-resource-namespace={{ $systemNamespace }}
- --health-probe-bind-address=0.0.0.0:10357
- --metrics-bind-address=:8080
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10357
- --v=2
{{- if .Values.controllerManager.controllers }}
- --controllers={{ .Values.controllerManager.controllers }}

View File

@ -47,11 +47,13 @@ spec:
- name: {{ $name }}-descheduler
image: {{ template "karmada.descheduler.image" . }}
imagePullPolicy: {{ .Values.descheduler.image.pullPolicy }}
env:
{{- include "karmada.env.podIP" . | nindent 12 }}
command:
- /bin/karmada-descheduler
- --kubeconfig=/etc/kubeconfig
- --metrics-bind-address=0.0.0.0:8080
- --health-probe-bind-address=0.0.0.0:10358
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10358
- --leader-elect-resource-namespace={{ $systemNamespace }}
- --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt

View File

@ -41,10 +41,12 @@ spec:
- name: apiserver-cert
mountPath: /etc/kubernetes/pki
readOnly: true
env:
{{- include "karmada.env.podIP" . | nindent 12 }}
command:
- /bin/karmada-metrics-adapter
- --kubeconfig=/etc/kubeconfig
- --metrics-bind-address=:8080
- --metrics-bind-address=$(POD_IP):8080
- --authentication-kubeconfig=/etc/kubeconfig
- --authorization-kubeconfig=/etc/kubeconfig
- --tls-cert-file=/etc/kubernetes/pki/karmada.crt
@ -53,6 +55,7 @@ spec:
- --audit-log-maxage=0
- --audit-log-maxbackup=0
- --tls-min-version=VersionTLS13
- --bind-address=$(POD_IP)
resources:
{{- toYaml .Values.metricsAdapter.resources | nindent 12 }}
readinessProbe:

View File

@ -44,6 +44,8 @@ spec:
- name: karmada-scheduler-estimator
image: {{ template "karmada.schedulerEstimator.image" $ }}
imagePullPolicy: {{ $.Values.schedulerEstimator.image.pullPolicy }}
env:
{{- include "karmada.env.podIP" . | nindent 12 }}
command:
- /bin/karmada-scheduler-estimator
- --kubeconfig=/etc/{{ $clusterName }}-kubeconfig
@ -51,11 +53,16 @@ spec:
- --grpc-auth-cert-file=/etc/karmada/pki/karmada.crt
- --grpc-auth-key-file=/etc/karmada/pki/karmada.key
- --grpc-client-ca-file=/etc/karmada/pki/server-ca.crt
- --metrics-bind-address=0.0.0.0:8080
- --health-probe-bind-address=0.0.0.0:10351
{{- with (include "karmada.schedulerEstimator.featureGates" (dict "featureGatesArg" $.Values.schedulerEstimator.featureGates)) }}
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10351
{{- /*
We use '$' to refer to the root context.
Inside this 'range' loop, '.' refers to the current item from '.Values.schedulerEstimator.memberClusters'.
Using '$' ensures that we can access the top-level '.Values.schedulerEstimator.featureGates'.
*/}}
{{- with (include "karmada.schedulerEstimator.featureGates" $) }}
- {{ . }}
{{- end}}
{{- end }}
livenessProbe:
httpGet:
path: /healthz

View File

@ -47,15 +47,23 @@ spec:
- name: {{ $name }}-scheduler
image: {{ template "karmada.scheduler.image" .}}
imagePullPolicy: {{ .Values.scheduler.image.pullPolicy }}
env:
{{- include "karmada.env.podIP" . | nindent 12 }}
command:
- /bin/karmada-scheduler
- --kubeconfig=/etc/kubeconfig
- --metrics-bind-address=0.0.0.0:8080
- --health-probe-bind-address=0.0.0.0:10351
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):10351
- --leader-elect-resource-namespace={{ $systemNamespace }}
- --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt
- --scheduler-estimator-key-file=/etc/karmada/pki/karmada.key
{{- if .Values.scheduler.enableSchedulerEstimator }}
- --enable-scheduler-estimator=true
{{- end }}
{{- with (include "karmada.scheduler.featureGates" .) }}
- {{ . }}
{{- end }}
livenessProbe:
httpGet:
path: /healthz

View File

@ -56,6 +56,8 @@ spec:
- name: kubeconfig-secret
subPath: kubeconfig
mountPath: /etc/kubeconfig
env:
{{- include "karmada.env.podIP" . | nindent 12 }}
command:
- /bin/karmada-search
- --kubeconfig=/etc/kubeconfig
@ -80,6 +82,7 @@ spec:
- --audit-log-maxage=0
- --audit-log-maxbackup=0
- --tls-min-version=VersionTLS13
- --bind-address=$(POD_IP)
livenessProbe:
httpGet:
path: /livez
@ -90,7 +93,7 @@ spec:
periodSeconds: 15
timeoutSeconds: 5
resources:
{{- toYaml .Values.apiServer.resources | nindent 12 }}
{{- toYaml .Values.search.resources | nindent 12 }}
priorityClassName: {{ .Values.search.priorityClassName }}
volumes:
{{- include "karmada.search.kubeconfig.volume" . | nindent 8 }}

View File

@ -47,12 +47,19 @@ spec:
- name: {{ $name }}-webhook
image: {{ template "karmada.webhook.image" . }}
imagePullPolicy: {{ .Values.webhook.image.pullPolicy }}
env:
{{- include "karmada.env.podIP" . | nindent 12 }}
command:
- /bin/karmada-webhook
- --kubeconfig=/etc/kubeconfig
- --bind-address=0.0.0.0
- --bind-address=$(POD_IP)
- --metrics-bind-address=$(POD_IP):8080
- --health-probe-bind-address=$(POD_IP):8000
- --secure-port=8443
- --cert-dir=/var/serving-cert
{{- with (include "karmada.webhook.featureGates" .) }}
- {{ . }}
{{- end }}
ports:
- containerPort: 8443
- containerPort: 8080

View File

@ -8,7 +8,7 @@ metadata:
name: {{ $name }}-crds-kustomization
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -28,7 +28,7 @@ metadata:
name: {{ $name }}-crds-autoscaling-bases
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -48,7 +48,7 @@ metadata:
name: {{ $name }}-crds-config-bases
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -68,7 +68,7 @@ metadata:
name: {{ $name }}-crds-multicluster-bases
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -88,7 +88,7 @@ metadata:
name: {{ $name }}-crds-networking-bases
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -109,7 +109,7 @@ metadata:
name: {{ $name }}-crds-policy-bases
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -129,7 +129,7 @@ metadata:
name: {{ $name }}-crds-remedy-bases
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -149,7 +149,7 @@ metadata:
name: {{ $name }}-crds-work-bases
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -169,7 +169,7 @@ metadata:
name: {{ $name }}-crds-apps-bases
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -182,6 +182,58 @@ data:
{{- $.Files.Get $path | nindent 8 }}
{{ end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ $name }}-hook-job
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "1"
{{- if "karmada.preInstallJob.labels" }}
labels:
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ $name }}-hook-job
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "1"
{{- if "karmada.preInstallJob.labels" }}
labels:
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
{{- end }}
rules:
- apiGroups: ['*']
resources: ['*']
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- nonResourceURLs: ['*']
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ $name }}-hook-job
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "1"
{{- if "karmada.preInstallJob.labels" }}
labels:
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ $name }}-hook-job
subjects:
- kind: ServiceAccount
name: {{ $name }}-hook-job
namespace: {{ $namespace }}
{{- if eq .Values.certs.mode "custom" }}
---
apiVersion: v1
@ -190,7 +242,7 @@ metadata:
name: {{ $name }}-static-resources
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -216,7 +268,7 @@ metadata:
name: {{ $name }}-crds-patches
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "2"
{{- if "karmada.preInstallJob.labels" }}
labels:
@ -388,8 +440,8 @@ spec:
mkdir -p /opt/configs
mkdir -p /opt/certs
cp -r -L /opt/mount/* /opt/configs/
openssl req -x509 -sha256 -new -nodes -days 365 -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/server-ca.key" -out "/opt/certs/server-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
openssl req -x509 -sha256 -new -nodes -days 365 -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/front-proxy-ca.key" -out "/opt/certs/front-proxy-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
openssl req -x509 -sha256 -new -nodes -days {{ .Values.certs.auto.rootCAExpiryDays }} -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/server-ca.key" -out "/opt/certs/server-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
openssl req -x509 -sha256 -new -nodes -days {{ .Values.certs.auto.rootCAExpiryDays }} -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/front-proxy-ca.key" -out "/opt/certs/front-proxy-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
echo '{"signing":{"default":{"expiry":{{ printf `"%s"` .Values.certs.auto.expiry }},"usages":["signing","key encipherment","client auth","server auth"]}}}' > "/opt/certs/server-ca-config.json"
echo '{"CN":"system:admin","hosts":{{ tpl (toJson .Values.certs.auto.hosts) . }},"names":[{"O":"system:masters"}],"key":{"algo":"rsa","size":{{ .Values.certs.auto.rsaSize }}}}' | cfssl gencert -ca=/opt/certs/server-ca.crt -ca-key=/opt/certs/server-ca.key -config=/opt/certs/server-ca-config.json - | cfssljson -bare /opt/certs/karmada
echo '{"signing":{"default":{"expiry":{{ printf `"%s"` .Values.certs.auto.expiry }},"usages":["signing","key encipherment","client auth","server auth"]}}}' > "/opt/certs/front-proxy-ca-config.json"
@ -446,56 +498,5 @@ spec:
- name: configs
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ $name }}-hook-job
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "1"
{{- if "karmada.preInstallJob.labels" }}
labels:
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ $name }}-hook-job
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "1"
{{- if "karmada.preInstallJob.labels" }}
labels:
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
{{- end }}
rules:
- apiGroups: ['*']
resources: ['*']
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- nonResourceURLs: ['*']
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ $name }}-hook-job
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "1"
{{- if "karmada.preInstallJob.labels" }}
labels:
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ $name }}-hook-job
subjects:
- kind: ServiceAccount
name: {{ $name }}-hook-job
namespace: {{ $namespace }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,98 @@
{{- $name := include "karmada.name" . -}}
{{- $namespace := include "karmada.namespace" . -}}
{{- if eq .Values.installMode "host" }}
{{- if eq .Values.certs.mode "auto" }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $name }}-static-resources
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "2"
data:
{{- print "webhook-configuration.yaml: " | nindent 6 }} |-
{{- include "karmada.webhook.configuration" . | nindent 8 }}
{{- print "system-namespace.yaml: " | nindent 6 }} |-
{{- include "karmada.systemNamespace" . | nindent 8 }}
{{- print "karmada-aggregated-apiserver-apiservice.yaml: " | nindent 6 }} |-
{{- include "karmada.apiservice" . | nindent 8 }}
{{- print "cluster-proxy-admin-rbac.yaml: " | nindent 6 }} |-
{{- include "karmada.proxyRbac" . | nindent 8 }}
{{- print "bootstrap-token-configuration.yaml: " | nindent 6 }} |-
{{- include "karmada.bootstrapToken.configuration" . | nindent 8 }}
{{- print "clusterrole.yaml: " | nindent 6 }} |-
{{- include "karmada.clusterrole" . | nindent 8 }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $name }}-crds-patches
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "2"
data:
{{- print "webhook_in_clusterresourcebindings.yaml: " | nindent 6 }} |-
{{- include "karmada.crd.patch.webhook.clusterresourcebinding" . | nindent 8 }}
{{- print "webhook_in_resourcebindings.yaml: " | nindent 6 }} |-
{{- include "karmada.crd.patch.webhook.resourcebinding" . | nindent 8 }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: "{{ $name }}-pre-upgrade"
namespace: {{ $namespace }}
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "3"
"helm.sh/hook-delete-policy": {{ .Values.preUpdateJob.hookDeletePolicy }}
{{- if "karmada.preUpdateJob.labels" }}
labels:
{{- include "karmada.preUpdateJob.labels" . | nindent 4 }}
{{- end }}
spec:
parallelism: 1
completions: 1
template:
metadata:
name: {{ $name }}-pre-upgrade
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ $name | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
spec:
{{- include "karmada.imagePullSecrets" . | nindent 6 }}
{{- with .Values.preUpdateJob.tolerations}}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.preUpdateJob.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ $name }}-hook-job
restartPolicy: Never
containers:
- name: pre-upgrade
image: {{ template "karmada.kubectl.image" . }}
imagePullPolicy: {{ .Values.kubectl.image.pullPolicy }}
command:
- /bin/sh
- -c
- |
set -ex
# Fetch certs from existing secret
karmada_ca=$(kubectl get secret {{ $name }}-cert -n {{ $namespace }} -o jsonpath='{.data.server-ca\.crt}')
kubectl get configmap {{ $name }}-static-resources -n {{ $namespace }} -o yaml | sed -e "s/{{ print "{{ ca_crt }}" }}/${karmada_ca}/g" | kubectl apply -f -
kubectl get configmap {{ $name }}-crds-patches -n {{ $namespace }} -o yaml | sed -e "s/{{ print "{{ ca_crt }}" }}/${karmada_ca}/g" | kubectl apply -f -
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 100m
memory: 128Mi
{{- end }}
{{- end }}

View File

@ -98,6 +98,12 @@ preInstallJob:
## Define policies that determine when to delete corresponding hook resources: before-hook-creation,hook-succeeded,hook-failed
hookDeletePolicy: "hook-succeeded"
preUpdateJob:
tolerations: []
nodeSelector: {}
## Define policies that determine when to delete corresponding hook resources: before-hook-creation,hook-succeeded,hook-failed
hookDeletePolicy: "hook-succeeded"
## static-resource job config
staticResourceJob:
tolerations: []
@ -128,7 +134,10 @@ certs:
mode: auto
auto:
## @param certs.auto.expiry expiry of the certificate
## Note: The expiry value should not exceed the rootCA expiry time (rootCAExpiryDays * 24h)
expiry: 43800h
## @param certs.auto.rootCAExpiryDays expiry of the root CA certificate in days, defaults to 3650 days (10 years)
rootCAExpiryDays: 3650
## @param certs.auto.hosts hosts of the certificate
hosts: [
"kubernetes.default.svc",
@ -234,6 +243,10 @@ scheduler:
podDisruptionBudget: *podDisruptionBudget
## @param scheduler.priorityClassName the priority class name for the karmada-scheduler
priorityClassName: "system-node-critical"
## @param scheduler.enableSchedulerEstimator enable scheduler estimator
enableSchedulerEstimator: false
## @param scheduler.featureGates A set of key=value pairs that describe feature gates for karmada-scheduler
featureGates: {}
## webhook config
webhook:
@ -288,10 +301,12 @@ webhook:
rollingUpdate:
maxUnavailable: 0
maxSurge: 50%
## @param apiServer.podDisruptionBudget
## @param webhook.podDisruptionBudget
podDisruptionBudget: *podDisruptionBudget
## @param webhook.priorityClassName the priority class name for the karmada-webhook
priorityClassName: "system-node-critical"
## @param webhook.featureGates A set of key=value pairs that describe feature gates for karmada-webhook
featureGates: {}
## controller manager config
controllerManager:
@ -443,6 +458,17 @@ apiServer:
podDisruptionBudget: *podDisruptionBudget
## @param apiServer.priorityClassName the priority class name for the karmada-apiserver
priorityClassName: "system-node-critical"
oidc:
caFile: ""
clientId: ""
groupsClaim: ""
groupsPrefix: ""
issuerUrl: ""
# @param apiServer.oidc.requiredClaim comma separated 'key=value' pairs that describe required claims in the ID token
requiredClaim: ""
signingAlgs: ""
usernameClaim: ""
usernamePrefix: ""
## karmada aggregated apiserver config
aggregatedApiServer:

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM alpine:3.21.3
FROM alpine:3.22.1
ARG BINARY

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM alpine:3.21.3
FROM alpine:3.22.1
ARG BINARY
ARG TARGETPLATFORM

View File

@ -28,14 +28,18 @@ import (
"k8s.io/client-go/informers"
kubeclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/flowcontrol"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
logsv1 "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/config"
"sigs.k8s.io/controller-runtime/pkg/healthz"
crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/karmada-io/karmada/cmd/agent/app/options"
@ -61,6 +65,7 @@ import (
"github.com/karmada-io/karmada/pkg/util/fedinformer/typedmanager"
"github.com/karmada-io/karmada/pkg/util/gclient"
"github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/pkg/util/indexregistry"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/pkg/util/objectwatcher"
"github.com/karmada-io/karmada/pkg/util/restmapper"
@ -70,13 +75,32 @@ import (
// NewAgentCommand creates a *cobra.Command object with default parameters
func NewAgentCommand(ctx context.Context) *cobra.Command {
logConfig := logsv1.NewLoggingConfiguration()
fss := cliflag.NamedFlagSets{}
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
logsv1.AddFlags(logConfig, logsFlagSet)
klogflag.Add(logsFlagSet)
genericFlagSet := fss.FlagSet("generic")
genericFlagSet.AddGoFlagSet(flag.CommandLine)
opts := options.NewOptions()
opts.AddFlags(genericFlagSet, controllers.ControllerNames())
cmd := &cobra.Command{
Use: names.KarmadaAgentComponentName,
Long: `The karmada-agent is the agent of member clusters. It can register a specific cluster to the Karmada control
plane and sync manifests from the Karmada control plane to the member cluster. In addition, it also syncs the status of member
cluster and manifests to the Karmada control plane.`,
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
return err
}
logs.InitLogs()
return nil
},
RunE: func(_ *cobra.Command, _ []string) error {
// validate options
if errs := opts.Validate(); len(errs) != 0 {
@ -97,16 +121,6 @@ cluster and manifests to the Karmada control plane.`,
},
}
fss := cliflag.NamedFlagSets{}
genericFlagSet := fss.FlagSet("generic")
genericFlagSet.AddGoFlagSet(flag.CommandLine)
opts.AddFlags(genericFlagSet, controllers.ControllerNames())
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
klogflag.Add(logsFlagSet)
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaAgentComponentName))
cmd.Flags().AddFlagSet(genericFlagSet)
cmd.Flags().AddFlagSet(logsFlagSet)
@ -140,8 +154,7 @@ func run(ctx context.Context, opts *options.Options) error {
if err != nil {
return fmt.Errorf("error building kubeconfig of karmada control plane: %w", err)
}
controlPlaneRestConfig.QPS, controlPlaneRestConfig.Burst = opts.KubeAPIQPS, opts.KubeAPIBurst
controlPlaneRestConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(opts.KubeAPIQPS, opts.KubeAPIBurst)
clusterConfig, err := controllerruntime.GetConfig()
if err != nil {
return fmt.Errorf("error building kubeconfig of member cluster: %w", err)
@ -164,28 +177,25 @@ func run(ctx context.Context, opts *options.Options) error {
ClusterConfig: clusterConfig,
}
id, err := util.ObtainClusterID(clusterKubeClient)
registerOption.ClusterID, err = util.ObtainClusterID(clusterKubeClient)
if err != nil {
return err
}
ok, name, err := util.IsClusterIdentifyUnique(karmadaClient, id)
if err != nil {
if err = registerOption.Validate(karmadaClient, true); err != nil {
return err
}
if !ok && opts.ClusterName != name {
return fmt.Errorf("the same cluster has been registered with name %s", name)
}
registerOption.ClusterID = id
clusterSecret, impersonatorSecret, err := util.ObtainCredentialsFromMemberCluster(clusterKubeClient, registerOption)
if err != nil {
return err
}
registerOption.Secret = *clusterSecret
registerOption.ImpersonatorSecret = *impersonatorSecret
if clusterSecret != nil {
registerOption.Secret = *clusterSecret
}
if impersonatorSecret != nil {
registerOption.ImpersonatorSecret = *impersonatorSecret
}
err = util.RegisterClusterInControllerPlane(registerOption, controlPlaneKubeClient, generateClusterInControllerPlane)
if err != nil {
return fmt.Errorf("failed to register with karmada control plane: %w", err)
@ -216,6 +226,7 @@ func run(ctx context.Context, opts *options.Options) error {
clusterv1alpha1.SchemeGroupVersion.WithKind("Cluster").GroupKind().String(): opts.ConcurrentClusterSyncs,
},
CacheSyncTimeout: opts.ClusterCacheSyncTimeout.Duration,
UsePriorityQueue: ptr.To(features.FeatureGate.Enabled(features.ControllerPriorityQueue)),
},
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
opts.DefaultTransform = fedinformer.StripUnusedFields
@ -231,11 +242,12 @@ func run(ctx context.Context, opts *options.Options) error {
return err
}
crtlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
crtlmetrics.Registry.MustRegister(metrics.ResourceCollectorsForAgent()...)
crtlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectorsForAgent()...)
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
if err = setupControllers(controllerManager, opts, ctx.Done()); err != nil {
if err = setupControllers(ctx, controllerManager, opts); err != nil {
return err
}
@ -247,25 +259,27 @@ func run(ctx context.Context, opts *options.Options) error {
return nil
}
func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) error {
func setupControllers(ctx context.Context, mgr controllerruntime.Manager, opts *options.Options) error {
restConfig := mgr.GetConfig()
dynamicClientSet := dynamic.NewForConfigOrDie(restConfig)
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(dynamicClientSet, 0, stopChan)
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(ctx, dynamicClientSet, 0)
controlPlaneKubeClientSet := kubeclientset.NewForConfigOrDie(restConfig)
// We need a service lister to build a resource interpreter with `ClusterIPServiceResolver`
// witch allows connection to the customized interpreter webhook without a cluster DNS service.
sharedFactory := informers.NewSharedInformerFactory(controlPlaneKubeClientSet, 0)
serviceLister := sharedFactory.Core().V1().Services().Lister()
sharedFactory.Start(stopChan)
sharedFactory.WaitForCacheSync(stopChan)
sharedFactory.Start(ctx.Done())
sharedFactory.WaitForCacheSync(ctx.Done())
resourceInterpreter := resourceinterpreter.NewResourceInterpreter(controlPlaneInformerManager, serviceLister)
if err := mgr.Add(resourceInterpreter); err != nil {
return fmt.Errorf("failed to setup custom resource interpreter: %w", err)
if err := resourceInterpreter.Start(ctx); err != nil {
return fmt.Errorf("failed to start resource interpreter: %w", err)
}
rateLimiterGetter := util.GetClusterRateLimiterGetter().SetDefaultLimits(opts.ClusterAPIQPS, opts.ClusterAPIBurst)
clusterClientOption := &util.ClientOption{RateLimiterGetter: rateLimiterGetter.GetRateLimiter}
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSetForAgent, resourceInterpreter)
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSetForAgent, clusterClientOption, resourceInterpreter)
controllerContext := controllerscontext.Context{
Mgr: mgr,
ObjectWatcher: objectWatcher,
@ -278,8 +292,6 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
ClusterSuccessThreshold: opts.ClusterSuccessThreshold,
ClusterFailureThreshold: opts.ClusterFailureThreshold,
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
ClusterAPIQPS: opts.ClusterAPIQPS,
ClusterAPIBurst: opts.ClusterAPIBurst,
ConcurrentWorkSyncs: opts.ConcurrentWorkSyncs,
RateLimiterOptions: opts.RateLimiterOpts,
EnableClusterResourceModeling: opts.EnableClusterResourceModeling,
@ -287,8 +299,9 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
CertRotationRemainingTimeThreshold: opts.CertRotationRemainingTimeThreshold,
KarmadaKubeconfigNamespace: opts.KarmadaKubeconfigNamespace,
},
StopChan: stopChan,
Context: ctx,
ResourceInterpreter: resourceInterpreter,
ClusterClientOption: clusterClientOption,
}
if err := controllers.StartControllers(controllerContext, controllersDisabledByDefault); err != nil {
@ -297,7 +310,7 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
// Ensure the InformerManager stops when the stop channel closes
go func() {
<-stopChan
<-ctx.Done()
genericmanager.StopInstance()
}()
@ -312,10 +325,9 @@ func startClusterStatusController(ctx controllerscontext.Context) (bool, error)
PredicateFunc: helper.NewClusterPredicateOnAgent(ctx.Opts.ClusterName),
TypedInformerManager: typedmanager.GetInstance(),
GenericInformerManager: genericmanager.GetInstance(),
StopChan: ctx.StopChan,
ClusterClientSetFunc: util.NewClusterClientSetForAgent,
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
ClusterClientOption: &util.ClientOption{QPS: ctx.Opts.ClusterAPIQPS, Burst: ctx.Opts.ClusterAPIBurst},
ClusterClientOption: ctx.ClusterClientOption,
ClusterStatusUpdateFrequency: ctx.Opts.ClusterStatusUpdateFrequency,
ClusterLeaseDuration: ctx.Opts.ClusterLeaseDuration,
ClusterLeaseRenewIntervalFraction: ctx.Opts.ClusterLeaseRenewIntervalFraction,
@ -337,9 +349,8 @@ func startExecutionController(ctx controllerscontext.Context) (bool, error) {
EventRecorder: ctx.Mgr.GetEventRecorderFor(execution.ControllerName),
RESTMapper: ctx.Mgr.GetRESTMapper(),
ObjectWatcher: ctx.ObjectWatcher,
PredicateFunc: helper.NewExecutionPredicateOnAgent(),
InformerManager: genericmanager.GetInstance(),
RatelimiterOptions: ctx.Opts.RateLimiterOptions,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := executionController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -353,9 +364,8 @@ func startWorkStatusController(ctx controllerscontext.Context) (bool, error) {
EventRecorder: ctx.Mgr.GetEventRecorderFor(status.WorkStatusControllerName),
RESTMapper: ctx.Mgr.GetRESTMapper(),
InformerManager: genericmanager.GetInstance(),
StopChan: ctx.StopChan,
Context: ctx.Context,
ObjectWatcher: ctx.ObjectWatcher,
PredicateFunc: helper.NewExecutionPredicateOnAgent(),
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
ClusterCacheSyncTimeout: ctx.Opts.ClusterCacheSyncTimeout,
ConcurrentWorkStatusSyncs: ctx.Opts.ConcurrentWorkSyncs,
@ -375,11 +385,15 @@ func startServiceExportController(ctx controllerscontext.Context) (bool, error)
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceExportControllerName),
RESTMapper: ctx.Mgr.GetRESTMapper(),
InformerManager: genericmanager.GetInstance(),
StopChan: ctx.StopChan,
Context: ctx.Context,
WorkerNumber: 3,
PredicateFunc: helper.NewPredicateForServiceExportControllerOnAgent(ctx.Opts.ClusterName),
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
ClusterCacheSyncTimeout: ctx.Opts.ClusterCacheSyncTimeout,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := indexregistry.RegisterWorkIndexByFieldSuspendDispatching(ctx.Context, ctx.Mgr); err != nil {
return false, err
}
serviceExportController.RunWorkQueue()
if err := serviceExportController.SetupWithManager(ctx.Mgr); err != nil {
@ -397,11 +411,12 @@ func startEndpointSliceCollectController(ctx controllerscontext.Context) (enable
Client: ctx.Mgr.GetClient(),
RESTMapper: ctx.Mgr.GetRESTMapper(),
InformerManager: genericmanager.GetInstance(),
StopChan: ctx.StopChan,
Context: ctx.Context,
WorkerNumber: 3,
PredicateFunc: helper.NewPredicateForEndpointSliceCollectControllerOnAgent(opts.ClusterName),
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
endpointSliceCollectController.RunWorkQueue()
if err := endpointSliceCollectController.SetupWithManager(ctx.Mgr); err != nil {

View File

@ -20,6 +20,7 @@ import (
"os"
"k8s.io/component-base/cli"
"k8s.io/component-base/logs"
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
"k8s.io/klog/v2"
controllerruntime "sigs.k8s.io/controller-runtime"
@ -37,5 +38,7 @@ func main() {
controllerruntime.SetLogger(klog.Background())
cmd := app.NewAgentCommand(ctx)
code := cli.Run(cmd)
// Ensure any buffered log entries are flushed
logs.FlushLogs()
os.Exit(code)
}

View File

@ -21,9 +21,12 @@ import (
"github.com/spf13/cobra"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
logsv1 "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/term"
"github.com/karmada-io/karmada/cmd/aggregated-apiserver/app/options"
"github.com/karmada-io/karmada/pkg/features"
"github.com/karmada-io/karmada/pkg/sharedcli"
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
"github.com/karmada-io/karmada/pkg/util/names"
@ -33,12 +36,30 @@ import (
// NewAggregatedApiserverCommand creates a *cobra.Command object with default parameters
func NewAggregatedApiserverCommand(ctx context.Context) *cobra.Command {
opts := options.NewOptions()
logConfig := logsv1.NewLoggingConfiguration()
fss := cliflag.NamedFlagSets{}
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
logsv1.AddFlags(logConfig, logsFlagSet)
klogflag.Add(logsFlagSet)
genericFlagSet := fss.FlagSet("generic")
opts.AddFlags(genericFlagSet)
cmd := &cobra.Command{
Use: names.KarmadaAggregatedAPIServerComponentName,
Long: `The karmada-aggregated-apiserver starts an aggregated server.
It is responsible for registering the Cluster API and provides the ability to aggregate APIs,
allowing users to access member clusters from the control plane directly.`,
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
return err
}
logs.InitLogs()
return nil
},
RunE: func(_ *cobra.Command, _ []string) error {
if err := opts.Complete(); err != nil {
return err
@ -53,15 +74,6 @@ allowing users to access member clusters from the control plane directly.`,
},
}
fss := cliflag.NamedFlagSets{}
genericFlagSet := fss.FlagSet("generic")
opts.AddFlags(genericFlagSet)
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
klogflag.Add(logsFlagSet)
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaAggregatedAPIServerComponentName))
cmd.Flags().AddFlagSet(genericFlagSet)
cmd.Flags().AddFlagSet(logsFlagSet)

View File

@ -33,9 +33,10 @@ import (
genericfilters "k8s.io/apiserver/pkg/server/filters"
genericoptions "k8s.io/apiserver/pkg/server/options"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/util/compatibility"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilversion "k8s.io/apiserver/pkg/util/version"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
@ -120,9 +121,9 @@ func (o *Options) Run(ctx context.Context) error {
}
restConfig := config.GenericConfig.ClientConfig
restConfig.QPS, restConfig.Burst = o.KubeAPIQPS, o.KubeAPIBurst
restConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(o.KubeAPIQPS, o.KubeAPIBurst)
secretLister := config.GenericConfig.SharedInformerFactory.Core().V1().Secrets().Lister()
config.GenericConfig.EffectiveVersion = utilversion.NewEffectiveVersion("1.0")
config.GenericConfig.EffectiveVersion = compatibility.DefaultBuildEffectiveVersion()
server, err := config.Complete().New(restConfig, secretLister)
if err != nil {

View File

@ -20,6 +20,7 @@ import (
"os"
"k8s.io/component-base/cli"
"k8s.io/component-base/logs"
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
controllerruntime "sigs.k8s.io/controller-runtime"
@ -29,6 +30,7 @@ import (
func main() {
ctx := controllerruntime.SetupSignalHandler()
cmd := app.NewAggregatedApiserverCommand(ctx)
code := cli.Run(cmd)
os.Exit(code)
exitCode := cli.Run(cmd)
logs.FlushLogs()
os.Exit(exitCode)
}

View File

@ -29,18 +29,22 @@ import (
"k8s.io/client-go/informers"
kubeclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/flowcontrol"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
logsv1 "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
"k8s.io/metrics/pkg/client/custom_metrics"
"k8s.io/metrics/pkg/client/external_metrics"
"k8s.io/utils/ptr"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/config"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/healthz"
crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/predicate"
@ -67,6 +71,7 @@ import (
"github.com/karmada-io/karmada/pkg/controllers/namespace"
"github.com/karmada-io/karmada/pkg/controllers/remediation"
"github.com/karmada-io/karmada/pkg/controllers/status"
"github.com/karmada-io/karmada/pkg/controllers/taint"
"github.com/karmada-io/karmada/pkg/controllers/unifiedauth"
"github.com/karmada-io/karmada/pkg/controllers/workloadrebalancer"
"github.com/karmada-io/karmada/pkg/dependenciesdistributor"
@ -84,6 +89,7 @@ import (
"github.com/karmada-io/karmada/pkg/util/fedinformer/typedmanager"
"github.com/karmada-io/karmada/pkg/util/gclient"
"github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/pkg/util/indexregistry"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/pkg/util/objectwatcher"
"github.com/karmada-io/karmada/pkg/util/overridemanager"
@ -94,36 +100,51 @@ import (
// NewControllerManagerCommand creates a *cobra.Command object with default parameters
func NewControllerManagerCommand(ctx context.Context) *cobra.Command {
opts := options.NewOptions()
cmd := &cobra.Command{
Use: names.KarmadaControllerManagerComponentName,
Long: `The karmada-controller-manager runs various controllers.
The controllers watch Karmada objects and then talk to the underlying clusters' API servers
to create regular Kubernetes resources.`,
RunE: func(_ *cobra.Command, _ []string) error {
// validate options
if errs := opts.Validate(); len(errs) != 0 {
return errs.ToAggregate()
}
return Run(ctx, opts)
},
}
logConfig := logsv1.NewLoggingConfiguration()
fss := cliflag.NamedFlagSets{}
logsFlagSet := fss.FlagSet("logs")
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
logsv1.AddFlags(logConfig, logsFlagSet)
klogflag.Add(logsFlagSet)
genericFlagSet := fss.FlagSet("generic")
// Add the flag(--kubeconfig) that is added by controller-runtime
// Add the flag(--kubeconfig) that is added by controller-runtime.
// (https://github.com/kubernetes-sigs/controller-runtime/blob/v0.11.1/pkg/client/config/config.go#L39),
// and update the flag usage.
genericFlagSet.AddGoFlagSet(flag.CommandLine)
genericFlagSet.Lookup("kubeconfig").Usage = "Path to karmada control plane kubeconfig file."
opts := options.NewOptions()
opts.AddFlags(genericFlagSet, controllers.ControllerNames(), sets.List(controllersDisabledByDefault))
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
klogflag.Add(logsFlagSet)
cmd := &cobra.Command{
Use: names.KarmadaControllerManagerComponentName,
Long: `The karmada-controller-manager runs various controllers.
The controllers watch Karmada objects and then talk to the underlying
clusters' API servers to create regular Kubernetes resources.`,
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
return err
}
logs.InitLogs()
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
// by setting a logger through log.SetLogger.
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
controllerruntime.SetLogger(klog.Background())
return nil
},
RunE: func(_ *cobra.Command, _ []string) error {
if errs := opts.Validate(); len(errs) != 0 {
return errs.ToAggregate()
}
return Run(ctx, opts)
},
}
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaControllerManagerComponentName))
cmd.Flags().AddFlagSet(genericFlagSet)
@ -131,6 +152,7 @@ to create regular Kubernetes resources.`,
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
sharedcli.SetUsageAndHelpFunc(cmd, fss, cols)
return cmd
}
@ -144,7 +166,7 @@ func Run(ctx context.Context, opts *options.Options) error {
if err != nil {
panic(err)
}
controlPlaneRestConfig.QPS, controlPlaneRestConfig.Burst = opts.KubeAPIQPS, opts.KubeAPIBurst
controlPlaneRestConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(opts.KubeAPIQPS, opts.KubeAPIBurst)
controllerManager, err := controllerruntime.NewManager(controlPlaneRestConfig, controllerruntime.Options{
Logger: klog.Background(),
Scheme: gclient.NewSchema(),
@ -172,6 +194,7 @@ func Run(ctx context.Context, opts *options.Options) error {
schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}.GroupKind().String(): opts.ConcurrentNamespaceSyncs,
},
CacheSyncTimeout: opts.ClusterCacheSyncTimeout.Duration,
UsePriorityQueue: ptr.To(features.FeatureGate.Enabled(features.ControllerPriorityQueue)),
},
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
opts.DefaultTransform = fedinformer.StripUnusedFields
@ -188,15 +211,12 @@ func Run(ctx context.Context, opts *options.Options) error {
return err
}
crtlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
crtlmetrics.Registry.MustRegister(metrics.ResourceCollectors()...)
crtlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
if err := helper.IndexWork(ctx, controllerManager); err != nil {
klog.Fatalf("Failed to index Work: %v", err)
}
setupControllers(controllerManager, opts, ctx.Done())
setupControllers(ctx, controllerManager, opts)
// blocks until the context is done.
if err := controllerManager.Start(ctx); err != nil {
@ -227,6 +247,7 @@ func init() {
controllers["unifiedAuth"] = startUnifiedAuthController
controllers["federatedResourceQuotaSync"] = startFederatedResourceQuotaSyncController
controllers["federatedResourceQuotaStatus"] = startFederatedResourceQuotaStatusController
controllers["federatedResourceQuotaEnforcement"] = startFederatedResourceQuotaEnforcementController
controllers["gracefulEviction"] = startGracefulEvictionController
controllers["applicationFailover"] = startApplicationFailoverController
controllers["federatedHorizontalPodAutoscaler"] = startFederatedHorizontalPodAutoscalerController
@ -239,40 +260,51 @@ func init() {
controllers["remedy"] = startRemedyController
controllers["workloadRebalancer"] = startWorkloadRebalancerController
controllers["agentcsrapproving"] = startAgentCSRApprovingController
controllers["clustertaintpolicy"] = startClusterTaintPolicyController
}
func startClusterController(ctx controllerscontext.Context) (enabled bool, err error) {
mgr := ctx.Mgr
opts := ctx.Opts
// Indexes are added to help the cluster-controller and TaintManager quickly locate ResourceBinding
// and ClusterResourceBinding resources associated with a given cluster when eviction is needed.
if err := indexregistry.RegisterResourceBindingIndexByFieldCluster(ctx.Context, mgr); err != nil {
return false, err
}
if err := indexregistry.RegisterClusterResourceBindingIndexByFieldCluster(ctx.Context, mgr); err != nil {
return false, err
}
clusterController := &cluster.Controller{
Client: mgr.GetClient(),
EventRecorder: mgr.GetEventRecorderFor(cluster.ControllerName),
ClusterMonitorPeriod: opts.ClusterMonitorPeriod.Duration,
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod.Duration,
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod.Duration,
FailoverEvictionTimeout: opts.FailoverEvictionTimeout.Duration,
EnableTaintManager: ctx.Opts.EnableTaintManager,
ClusterTaintEvictionRetryFrequency: 10 * time.Second,
ExecutionSpaceRetryFrequency: 10 * time.Second,
Client: mgr.GetClient(),
EventRecorder: mgr.GetEventRecorderFor(cluster.ControllerName),
ClusterMonitorPeriod: opts.ClusterMonitorPeriod.Duration,
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod.Duration,
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod.Duration,
CleanupCheckInterval: 10 * time.Second,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := clusterController.SetupWithManager(mgr); err != nil {
return false, err
}
if ctx.Opts.EnableTaintManager {
if err := cluster.IndexField(mgr); err != nil {
return false, err
}
// Taint-based eviction should only take effect if the Failover feature is enabled
if ctx.Opts.EnableTaintManager && features.FeatureGate.Enabled(features.Failover) {
taintManager := &cluster.NoExecuteTaintManager{
Client: mgr.GetClient(),
EventRecorder: mgr.GetEventRecorderFor(cluster.TaintManagerName),
ClusterTaintEvictionRetryFrequency: 10 * time.Second,
ConcurrentReconciles: 3,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
EnableNoExecuteTaintEviction: ctx.Opts.FailoverConfiguration.EnableNoExecuteTaintEviction,
NoExecuteTaintEvictionPurgeMode: ctx.Opts.FailoverConfiguration.NoExecuteTaintEvictionPurgeMode,
}
if err := taintManager.SetupWithManager(mgr); err != nil {
return false, err
}
} else {
klog.Infof("Skipping registration of TaintManager, please check that TaintManager option and Failover feature-gate are enabled.")
}
return true, nil
@ -281,7 +313,6 @@ func startClusterController(ctx controllerscontext.Context) (enabled bool, err e
func startClusterStatusController(ctx controllerscontext.Context) (enabled bool, err error) {
mgr := ctx.Mgr
opts := ctx.Opts
stopChan := ctx.StopChan
clusterPredicateFunc := predicate.Funcs{
CreateFunc: func(createEvent event.CreateEvent) bool {
obj := createEvent.Object.(*clusterv1alpha1.Cluster)
@ -321,10 +352,9 @@ func startClusterStatusController(ctx controllerscontext.Context) (enabled bool,
PredicateFunc: clusterPredicateFunc,
TypedInformerManager: typedmanager.GetInstance(),
GenericInformerManager: genericmanager.GetInstance(),
StopChan: stopChan,
ClusterClientSetFunc: util.NewClusterClientSet,
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
ClusterClientOption: &util.ClientOption{QPS: opts.ClusterAPIQPS, Burst: opts.ClusterAPIBurst},
ClusterClientOption: ctx.ClusterClientOption,
ClusterStatusUpdateFrequency: opts.ClusterStatusUpdateFrequency,
ClusterLeaseDuration: opts.ClusterLeaseDuration,
ClusterLeaseRenewIntervalFraction: opts.ClusterLeaseRenewIntervalFraction,
@ -341,6 +371,12 @@ func startClusterStatusController(ctx controllerscontext.Context) (enabled bool,
}
func startBindingController(ctx controllerscontext.Context) (enabled bool, err error) {
// To efficiently clean up Work resources created by the bindingController when a cluster or a RB/CRB is deleted,
// we index the Work resources to reduce the overhead during each check.
if err = indexregistry.RegisterWorkIndexByLabelResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
klog.Errorf("Failed to register index for Work based on ResourceBinding ID: %v", err)
return false, err
}
bindingController := &binding.ResourceBindingController{
Client: ctx.Mgr.GetClient(),
DynamicClient: ctx.DynamicClientSet,
@ -355,6 +391,10 @@ func startBindingController(ctx controllerscontext.Context) (enabled bool, err e
return false, err
}
if err = indexregistry.RegisterWorkIndexByLabelClusterResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
klog.Errorf("Failed to register index for Work based on ClusterResourceBinding ID: %v", err)
return false, err
}
clusterResourceBindingController := &binding.ClusterResourceBindingController{
Client: ctx.Mgr.GetClient(),
DynamicClient: ctx.DynamicClientSet,
@ -372,6 +412,11 @@ func startBindingController(ctx controllerscontext.Context) (enabled bool, err e
}
func startBindingStatusController(ctx controllerscontext.Context) (enabled bool, err error) {
// Indexing Work resources allows efficient retrieval for aggregating status.
if err = indexregistry.RegisterWorkIndexByLabelResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
klog.Errorf("Failed to register index for Work based on ResourceBinding ID: %v", err)
return false, err
}
rbStatusController := &status.RBStatusController{
Client: ctx.Mgr.GetClient(),
DynamicClient: ctx.DynamicClientSet,
@ -385,6 +430,10 @@ func startBindingStatusController(ctx controllerscontext.Context) (enabled bool,
return false, err
}
if err = indexregistry.RegisterWorkIndexByLabelClusterResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
klog.Errorf("Failed to register index for Work based on ClusterResourceBinding ID: %v", err)
return false, err
}
crbStatusController := &status.CRBStatusController{
Client: ctx.Mgr.GetClient(),
DynamicClient: ctx.DynamicClientSet,
@ -407,9 +456,9 @@ func startExecutionController(ctx controllerscontext.Context) (enabled bool, err
EventRecorder: ctx.Mgr.GetEventRecorderFor(execution.ControllerName),
RESTMapper: ctx.Mgr.GetRESTMapper(),
ObjectWatcher: ctx.ObjectWatcher,
PredicateFunc: helper.NewExecutionPredicate(ctx.Mgr),
WorkPredicateFunc: helper.WorkWithinPushClusterPredicate(ctx.Mgr),
InformerManager: genericmanager.GetInstance(),
RatelimiterOptions: ctx.Opts.RateLimiterOptions,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := executionController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -424,10 +473,11 @@ func startWorkStatusController(ctx controllerscontext.Context) (enabled bool, er
EventRecorder: ctx.Mgr.GetEventRecorderFor(status.WorkStatusControllerName),
RESTMapper: ctx.Mgr.GetRESTMapper(),
InformerManager: genericmanager.GetInstance(),
StopChan: ctx.StopChan,
Context: ctx.Context,
ObjectWatcher: ctx.ObjectWatcher,
PredicateFunc: helper.NewExecutionPredicate(ctx.Mgr),
WorkPredicateFunc: helper.WorkWithinPushClusterPredicate(ctx.Mgr),
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
ClusterClientOption: ctx.ClusterClientOption,
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
ConcurrentWorkStatusSyncs: opts.ConcurrentWorkSyncs,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
@ -447,6 +497,7 @@ func startNamespaceController(ctx controllerscontext.Context) (enabled bool, err
EventRecorder: ctx.Mgr.GetEventRecorderFor(namespace.ControllerName),
SkippedPropagatingNamespaces: ctx.Opts.SkippedPropagatingNamespaces,
OverrideManager: ctx.OverrideManager,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := namespaceSyncController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -461,11 +512,17 @@ func startServiceExportController(ctx controllerscontext.Context) (enabled bool,
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceExportControllerName),
RESTMapper: ctx.Mgr.GetRESTMapper(),
InformerManager: genericmanager.GetInstance(),
StopChan: ctx.StopChan,
Context: ctx.Context,
WorkerNumber: 3,
PredicateFunc: helper.NewPredicateForServiceExportController(ctx.Mgr),
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
ClusterClientOption: ctx.ClusterClientOption,
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
// Add an index so ServiceExportController can quickly find and delete related Work resources.
if err = indexregistry.RegisterWorkIndexByFieldSuspendDispatching(ctx.Context, ctx.Mgr); err != nil {
return false, err
}
serviceExportController.RunWorkQueue()
if err := serviceExportController.SetupWithManager(ctx.Mgr); err != nil {
@ -483,11 +540,13 @@ func startEndpointSliceCollectController(ctx controllerscontext.Context) (enable
Client: ctx.Mgr.GetClient(),
RESTMapper: ctx.Mgr.GetRESTMapper(),
InformerManager: genericmanager.GetInstance(),
StopChan: ctx.StopChan,
Context: ctx.Context,
WorkerNumber: 3,
PredicateFunc: helper.NewPredicateForEndpointSliceCollectController(ctx.Mgr),
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
ClusterClientOption: ctx.ClusterClientOption,
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
endpointSliceCollectController.RunWorkQueue()
if err := endpointSliceCollectController.SetupWithManager(ctx.Mgr); err != nil {
@ -501,10 +560,11 @@ func startEndpointSliceDispatchController(ctx controllerscontext.Context) (enabl
return false, nil
}
endpointSliceSyncController := &multiclusterservice.EndpointsliceDispatchController{
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(multiclusterservice.EndpointsliceDispatchControllerName),
RESTMapper: ctx.Mgr.GetRESTMapper(),
InformerManager: genericmanager.GetInstance(),
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(multiclusterservice.EndpointsliceDispatchControllerName),
RESTMapper: ctx.Mgr.GetRESTMapper(),
InformerManager: genericmanager.GetInstance(),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := endpointSliceSyncController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -514,8 +574,9 @@ func startEndpointSliceDispatchController(ctx controllerscontext.Context) (enabl
func startEndpointSliceController(ctx controllerscontext.Context) (enabled bool, err error) {
endpointSliceController := &mcs.EndpointSliceController{
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.EndpointSliceControllerName),
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.EndpointSliceControllerName),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := endpointSliceController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -525,8 +586,9 @@ func startEndpointSliceController(ctx controllerscontext.Context) (enabled bool,
func startServiceImportController(ctx controllerscontext.Context) (enabled bool, err error) {
serviceImportController := &mcs.ServiceImportController{
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceImportControllerName),
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceImportControllerName),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := serviceImportController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -536,8 +598,9 @@ func startServiceImportController(ctx controllerscontext.Context) (enabled bool,
func startUnifiedAuthController(ctx controllerscontext.Context) (enabled bool, err error) {
unifiedAuthController := &unifiedauth.Controller{
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(unifiedauth.ControllerName),
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(unifiedauth.ControllerName),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := unifiedAuthController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -547,8 +610,9 @@ func startUnifiedAuthController(ctx controllerscontext.Context) (enabled bool, e
func startFederatedResourceQuotaSyncController(ctx controllerscontext.Context) (enabled bool, err error) {
controller := federatedresourcequota.SyncController{
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.SyncControllerName),
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.SyncControllerName),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err = controller.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -558,8 +622,26 @@ func startFederatedResourceQuotaSyncController(ctx controllerscontext.Context) (
func startFederatedResourceQuotaStatusController(ctx controllerscontext.Context) (enabled bool, err error) {
controller := federatedresourcequota.StatusController{
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.StatusControllerName),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err = controller.SetupWithManager(ctx.Mgr); err != nil {
return false, err
}
return true, nil
}
func startFederatedResourceQuotaEnforcementController(ctx controllerscontext.Context) (enabled bool, err error) {
if !features.FeatureGate.Enabled(features.FederatedQuotaEnforcement) {
return false, nil
}
controller := federatedresourcequota.QuotaEnforcementController{
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.StatusControllerName),
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.QuotaEnforcementControllerName),
Recalculation: federatedresourcequota.QuotaRecalculation{
ResyncPeriod: ctx.Opts.FederatedResourceQuotaOptions.ResourceQuotaSyncPeriod,
},
}
if err = controller.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -596,6 +678,7 @@ func startApplicationFailoverController(ctx controllerscontext.Context) (enabled
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(applicationfailover.RBApplicationFailoverControllerName),
ResourceInterpreter: ctx.ResourceInterpreter,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err = rbApplicationFailoverController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -605,6 +688,7 @@ func startApplicationFailoverController(ctx controllerscontext.Context) (enabled
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(applicationfailover.CRBApplicationFailoverControllerName),
ResourceInterpreter: ctx.ResourceInterpreter,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err = crbApplicationFailoverController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
@ -617,7 +701,7 @@ func startFederatedHorizontalPodAutoscalerController(ctx controllerscontext.Cont
go custom_metrics.PeriodicallyInvalidate(
apiVersionsGetter,
ctx.Opts.HPAControllerConfiguration.HorizontalPodAutoscalerSyncPeriod.Duration,
ctx.StopChan)
ctx.Context.Done())
metricsClient := metricsclient.NewRESTMetricsClient(
resourceclient.NewForConfigOrDie(ctx.Mgr.GetConfig()),
custom_metrics.NewForConfig(ctx.Mgr.GetConfig(), ctx.Mgr.GetRESTMapper(), apiVersionsGetter),
@ -659,8 +743,9 @@ func startCronFederatedHorizontalPodAutoscalerController(ctx controllerscontext.
func startHPAScaleTargetMarkerController(ctx controllerscontext.Context) (enabled bool, err error) {
hpaScaleTargetMarker := hpascaletargetmarker.HpaScaleTargetMarker{
DynamicClient: ctx.DynamicClientSet,
RESTMapper: ctx.Mgr.GetRESTMapper(),
DynamicClient: ctx.DynamicClientSet,
RESTMapper: ctx.Mgr.GetRESTMapper(),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
err = hpaScaleTargetMarker.SetupWithManager(ctx.Mgr)
if err != nil {
@ -672,7 +757,8 @@ func startHPAScaleTargetMarkerController(ctx controllerscontext.Context) (enable
func startDeploymentReplicasSyncerController(ctx controllerscontext.Context) (enabled bool, err error) {
deploymentReplicasSyncer := deploymentreplicassyncer.DeploymentReplicasSyncer{
Client: ctx.Mgr.GetClient(),
Client: ctx.Mgr.GetClient(),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
err = deploymentReplicasSyncer.SetupWithManager(ctx.Mgr)
if err != nil {
@ -710,7 +796,8 @@ func startRemedyController(ctx controllerscontext.Context) (enabled bool, err er
func startWorkloadRebalancerController(ctx controllerscontext.Context) (enabled bool, err error) {
workloadRebalancer := workloadrebalancer.RebalancerController{
Client: ctx.Mgr.GetClient(),
Client: ctx.Mgr.GetClient(),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
err = workloadRebalancer.SetupWithManager(ctx.Mgr)
if err != nil {
@ -721,7 +808,10 @@ func startWorkloadRebalancerController(ctx controllerscontext.Context) (enabled
}
func startAgentCSRApprovingController(ctx controllerscontext.Context) (enabled bool, err error) {
agentCSRApprover := approver.AgentCSRApprovingController{Client: ctx.KubeClientSet}
agentCSRApprover := approver.AgentCSRApprovingController{
Client: ctx.KubeClientSet,
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
err = agentCSRApprover.SetupWithManager(ctx.Mgr)
if err != nil {
return false, err
@ -729,8 +819,24 @@ func startAgentCSRApprovingController(ctx controllerscontext.Context) (enabled b
return true, nil
}
func startClusterTaintPolicyController(ctx controllerscontext.Context) (enabled bool, err error) {
if !features.FeatureGate.Enabled(features.Failover) {
return false, nil
}
clusterTaintPolicyController := taint.ClusterTaintPolicyController{
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(taint.ControllerName),
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
}
if err := clusterTaintPolicyController.SetupWithManager(ctx.Mgr); err != nil {
return false, err
}
return true, nil
}
// setupControllers initialize controllers and setup one by one.
func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) {
func setupControllers(ctx context.Context, mgr controllerruntime.Manager, opts *options.Options) {
restConfig := mgr.GetConfig()
dynamicClientSet := dynamic.NewForConfigOrDie(restConfig)
discoverClientSet := discovery.NewDiscoveryClientForConfigOrDie(restConfig)
@ -743,25 +849,27 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
return
}
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(dynamicClientSet, opts.ResyncPeriod.Duration, stopChan)
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(ctx, dynamicClientSet, opts.ResyncPeriod.Duration)
// We need a service lister to build a resource interpreter with `ClusterIPServiceResolver`
// witch allows connection to the customized interpreter webhook without a cluster DNS service.
sharedFactory := informers.NewSharedInformerFactory(kubeClientSet, opts.ResyncPeriod.Duration)
serviceLister := sharedFactory.Core().V1().Services().Lister()
sharedFactory.Start(stopChan)
sharedFactory.WaitForCacheSync(stopChan)
sharedFactory.Start(ctx.Done())
sharedFactory.WaitForCacheSync(ctx.Done())
resourceInterpreter := resourceinterpreter.NewResourceInterpreter(controlPlaneInformerManager, serviceLister)
if err := mgr.Add(resourceInterpreter); err != nil {
klog.Fatalf("Failed to setup custom resource interpreter: %v", err)
if err := resourceInterpreter.Start(ctx); err != nil {
klog.Fatalf("Failed to start resource interpreter: %v", err)
}
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSet, resourceInterpreter)
rateLimiterGetter := util.GetClusterRateLimiterGetter().SetDefaultLimits(opts.ClusterAPIQPS, opts.ClusterAPIBurst)
clusterClientOption := &util.ClientOption{RateLimiterGetter: rateLimiterGetter.GetRateLimiter}
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSet, clusterClientOption, resourceInterpreter)
resourceDetector := &detector.ResourceDetector{
DiscoveryClientSet: discoverClientSet,
Client: mgr.GetClient(),
InformerManager: controlPlaneInformerManager,
ControllerRuntimeCache: mgr.GetCache(),
RESTMapper: mgr.GetRESTMapper(),
DynamicClient: dynamicClientSet,
SkippedResourceConfig: skippedResourceConfig,
@ -792,7 +900,7 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
klog.Fatalf("Failed to setup dependencies distributor: %v", err)
}
}
setupClusterAPIClusterDetector(mgr, opts, stopChan)
setupClusterAPIClusterDetector(ctx, mgr, opts)
controllerContext := controllerscontext.Context{
Mgr: mgr,
ObjectWatcher: objectWatcher,
@ -802,14 +910,11 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod,
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod,
ClusterStatusUpdateFrequency: opts.ClusterStatusUpdateFrequency,
FailoverEvictionTimeout: opts.FailoverEvictionTimeout,
ClusterLeaseDuration: opts.ClusterLeaseDuration,
ClusterLeaseRenewIntervalFraction: opts.ClusterLeaseRenewIntervalFraction,
ClusterSuccessThreshold: opts.ClusterSuccessThreshold,
ClusterFailureThreshold: opts.ClusterFailureThreshold,
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
ClusterAPIQPS: opts.ClusterAPIQPS,
ClusterAPIBurst: opts.ClusterAPIBurst,
SkippedPropagatingNamespaces: opts.SkippedNamespacesRegexps(),
ConcurrentWorkSyncs: opts.ConcurrentWorkSyncs,
EnableTaintManager: opts.EnableTaintManager,
@ -817,13 +922,16 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
GracefulEvictionTimeout: opts.GracefulEvictionTimeout,
EnableClusterResourceModeling: opts.EnableClusterResourceModeling,
HPAControllerConfiguration: opts.HPAControllerConfiguration,
FederatedResourceQuotaOptions: opts.FederatedResourceQuotaOptions,
FailoverConfiguration: opts.FailoverOptions,
},
StopChan: stopChan,
Context: ctx,
DynamicClientSet: dynamicClientSet,
KubeClientSet: kubeClientSet,
OverrideManager: overrideManager,
ControlPlaneInformerManager: controlPlaneInformerManager,
ResourceInterpreter: resourceInterpreter,
ClusterClientOption: clusterClientOption,
}
if err := controllers.StartControllers(controllerContext, controllersDisabledByDefault); err != nil {
@ -832,13 +940,13 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
// Ensure the InformerManager stops when the stop channel closes
go func() {
<-stopChan
<-ctx.Done()
genericmanager.StopInstance()
}()
}
// setupClusterAPIClusterDetector initialize Cluster detector with the cluster-api management cluster.
func setupClusterAPIClusterDetector(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) {
func setupClusterAPIClusterDetector(ctx context.Context, mgr controllerruntime.Manager, opts *options.Options) {
if len(opts.ClusterAPIKubeconfig) == 0 {
return
}
@ -859,7 +967,7 @@ func setupClusterAPIClusterDetector(mgr controllerruntime.Manager, opts *options
ControllerPlaneConfig: mgr.GetConfig(),
ClusterAPIConfig: clusterAPIRestConfig,
ClusterAPIClient: clusterAPIClient,
InformerManager: genericmanager.NewSingleClusterInformerManager(dynamic.NewForConfigOrDie(clusterAPIRestConfig), 0, stopChan),
InformerManager: genericmanager.NewSingleClusterInformerManager(ctx, dynamic.NewForConfigOrDie(clusterAPIRestConfig), 0),
ConcurrentReconciles: 3,
}
if err := mgr.Add(clusterAPIClusterDetector); err != nil {

View File

@ -0,0 +1,60 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// FailoverOptions holds the Failover configurations.
type FailoverOptions struct {
// EnableNoExecuteTaintEviction enables controller response to NoExecute taints on clusters,
// which triggers eviction of workloads without explicit tolerations.
EnableNoExecuteTaintEviction bool
// NoExecuteTaintEvictionPurgeMode controls resource cleanup behavior for NoExecute-triggered
// evictions (only active when --enable-no-execute-taint-eviction=true).
// Valid modes:
// - "Gracefully": first schedules workloads to new clusters and then cleans up original
// workloads after successful startup elsewhere to ensure service continuity.
// - "Directly": directly evicts workloads first (risking temporary service interruption)
// and then triggers rescheduling to other clusters.
// Default: "Gracefully".
NoExecuteTaintEvictionPurgeMode string
}
// AddFlags adds flags related to FailoverOptions for controller manager to the specified FlagSet.
func (o *FailoverOptions) AddFlags(flags *pflag.FlagSet) {
if o == nil {
return
}
flags.BoolVar(&o.EnableNoExecuteTaintEviction, "enable-no-execute-taint-eviction", false, "Enables controller response to NoExecute taints on clusters, which triggers eviction of workloads without explicit tolerations. Given the impact of eviction caused by NoExecute Taint, this parameter is designed to remain disabled by default and requires careful evaluation by administrators before being enabled.\n")
flags.StringVar(&o.NoExecuteTaintEvictionPurgeMode, "no-execute-taint-eviction-purge-mode", "Gracefully", "Controls resource cleanup behavior for NoExecute-triggered evictions (only active when --enable-no-execute-taint-eviction=true). Supported values are \"Directly\", and \"Gracefully\". \"Directly\" mode directly evicts workloads first (risking temporary service interruption) and then triggers rescheduling to other clusters, while \"Gracefully\" mode first schedules workloads to new clusters and then cleans up original workloads after successful startup elsewhere to ensure service continuity.")
}
// Validate checks FailoverOptions and return a slice of found errs.
func (o *FailoverOptions) Validate() field.ErrorList {
errs := field.ErrorList{}
if o.EnableNoExecuteTaintEviction &&
o.NoExecuteTaintEvictionPurgeMode != "Gracefully" &&
o.NoExecuteTaintEvictionPurgeMode != "Directly" {
errs = append(errs, field.Invalid(field.NewPath("FailoverOptions").Child("NoExecuteTaintEvictionPurgeMode"),
o.NoExecuteTaintEvictionPurgeMode, "Invalid mode"))
}
return errs
}

View File

@ -0,0 +1,48 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"time"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// FederatedResourceQuotaOptions holds the FederatedResourceQuota-related options.
type FederatedResourceQuotaOptions struct {
// federatedResourceQuotaSyncPeriod is the period for syncing federated resource quota usage status
// in the system.
ResourceQuotaSyncPeriod metav1.Duration
}
// AddFlags adds flags related to FederatedResourceQuotaEnforcement for controller manager to the specified FlagSet.
func (o *FederatedResourceQuotaOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
fs.DurationVar(&o.ResourceQuotaSyncPeriod.Duration, "federated-resource-quota-sync-period", time.Minute*5, "The interval for periodic full resynchronization of FederatedResourceQuota resources. This ensures quota recalculations occur at regular intervals to correct potential inaccuracies, particularly when webhook validation side effects.")
}
// Validate checks FederatedResourceQuotaOptions and return a slice of found errs.
func (o *FederatedResourceQuotaOptions) Validate() field.ErrorList {
if o.ResourceQuotaSyncPeriod.Duration <= 0 {
return field.ErrorList{field.Invalid(field.NewPath("federatedResourceQuotaSyncPeriod"), o.ResourceQuotaSyncPeriod, "must be greater than 0")}
}
return nil
}

View File

@ -54,8 +54,6 @@ type Options struct {
// ClusterStatusUpdateFrequency is the frequency that controller computes and report cluster status.
// It must work with ClusterMonitorGracePeriod(--cluster-monitor-grace-period) in karmada-controller-manager.
ClusterStatusUpdateFrequency metav1.Duration
// FailoverEvictionTimeout is the grace period for deleting scheduling result on failed clusters.
FailoverEvictionTimeout metav1.Duration
// ClusterLeaseDuration is a duration that candidates for a lease need to wait to force acquire it.
// This is measure against time of last observed lease RenewTime.
ClusterLeaseDuration metav1.Duration
@ -146,6 +144,10 @@ type Options struct {
// in scenario of dynamic replica assignment based on cluster free resources.
// Disable if it does not fit your cases for better performance.
EnableClusterResourceModeling bool
// FederatedResourceQuotaOptions holds configurations for FederatedResourceQuota reconciliation.
FederatedResourceQuotaOptions FederatedResourceQuotaOptions
// FailoverOptions holds the Failover configurations.
FailoverOptions FailoverOptions
}
// NewOptions builds an empty options.
@ -195,8 +197,6 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau
"Specifies the grace period of allowing a running cluster to be unresponsive before marking it unhealthy.")
flags.DurationVar(&o.ClusterStartupGracePeriod.Duration, "cluster-startup-grace-period", 60*time.Second,
"Specifies the grace period of allowing a cluster to be unresponsive during startup before marking it unhealthy.")
flags.DurationVar(&o.FailoverEvictionTimeout.Duration, "failover-eviction-timeout", 5*time.Minute,
"Specifies the grace period for deleting scheduling result on failed clusters.")
flags.StringVar(&o.SkippedPropagatingAPIs, "skipped-propagating-apis", "", "Semicolon separated resources that should be skipped from propagating in addition to the default skip list(cluster.karmada.io;policy.karmada.io;work.karmada.io). Supported formats are:\n"+
"<group> for skip resources with a specific API group(e.g. networking.k8s.io),\n"+
"<group>/<version> for skip resources with a specific API version(e.g. networking.k8s.io/v1beta1),\n"+
@ -232,6 +232,8 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau
o.RateLimiterOpts.AddFlags(flags)
o.ProfileOpts.AddFlags(flags)
o.HPAControllerConfiguration.AddFlags(flags)
o.FederatedResourceQuotaOptions.AddFlags(flags)
o.FailoverOptions.AddFlags(flags)
features.FeatureGate.AddFlag(flags)
}

View File

@ -54,5 +54,9 @@ func (o *Options) Validate() field.ErrorList {
errs = append(errs, field.Invalid(newPath.Child("SkippedPropagatingNamespaces").Index(index), ns, "Invalid namespace regular expression"))
}
}
errs = append(errs, o.FederatedResourceQuotaOptions.Validate()...)
errs = append(errs, o.FailoverOptions.Validate()...)
return errs
}

View File

@ -36,6 +36,11 @@ func New(modifyOptions ModifyOptions) Options {
ClusterMonitorPeriod: metav1.Duration{Duration: 10 * time.Second},
ClusterMonitorGracePeriod: metav1.Duration{Duration: 10 * time.Second},
ClusterStartupGracePeriod: metav1.Duration{Duration: 10 * time.Second},
FederatedResourceQuotaOptions: FederatedResourceQuotaOptions{
ResourceQuotaSyncPeriod: metav1.Duration{
Duration: 10 * time.Second,
},
},
}
if modifyOptions != nil {
@ -96,6 +101,15 @@ func TestValidateControllerManagerConfiguration(t *testing.T) {
}),
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ClusterStartupGracePeriod"), metav1.Duration{Duration: 0 * time.Second}, "must be greater than 0")},
},
"invalid FailoverOptions": {
opt: New(func(options *Options) {
options.FailoverOptions.EnableNoExecuteTaintEviction = true
options.FailoverOptions.NoExecuteTaintEvictionPurgeMode = ""
}),
expectedErrs: field.ErrorList{
field.Invalid(field.NewPath("FailoverOptions").Child("NoExecuteTaintEvictionPurgeMode"), "", "Invalid mode"),
},
},
}
for _, testCase := range testCases {

View File

@ -20,8 +20,8 @@ import (
"os"
"k8s.io/component-base/cli"
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
"k8s.io/klog/v2"
"k8s.io/component-base/logs"
_ "k8s.io/component-base/logs/json/register" // To enable JSON log format support
controllerruntime "sigs.k8s.io/controller-runtime"
"github.com/karmada-io/karmada/cmd/controller-manager/app"
@ -29,13 +29,9 @@ import (
func main() {
ctx := controllerruntime.SetupSignalHandler()
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
// by setting a logger through log.SetLogger.
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
controllerruntime.SetLogger(klog.Background())
cmd := app.NewControllerManagerCommand(ctx)
code := cli.Run(cmd)
os.Exit(code)
exitCode := cli.Run(cmd)
// Ensure any buffered log entries are flushed
logs.FlushLogs()
os.Exit(exitCode)
}

View File

@ -31,14 +31,19 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/util/flowcontrol"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
logsv1 "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"github.com/karmada-io/karmada/cmd/descheduler/app/options"
"github.com/karmada-io/karmada/pkg/descheduler"
"github.com/karmada-io/karmada/pkg/features"
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
"github.com/karmada-io/karmada/pkg/sharedcli"
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
@ -77,20 +82,30 @@ const (
)
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
func NewDeschedulerCommand(stopChan <-chan struct{}) *cobra.Command {
func NewDeschedulerCommand(ctx context.Context) *cobra.Command {
logConfig := logsv1.NewLoggingConfiguration()
fss := cliflag.NamedFlagSets{}
logsFlagSet := fss.FlagSet("logs")
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
logsv1.AddFlags(logConfig, logsFlagSet)
klogflag.Add(logsFlagSet)
genericFlagSet := fss.FlagSet("generic")
opts := options.NewOptions()
opts.AddFlags(genericFlagSet)
cmd := &cobra.Command{
Use: names.KarmadaDeschedulerComponentName,
Long: `The karmada-descheduler evicts replicas from member clusters
if they are failed to be scheduled for a period of time. It relies on
if they are failed to be scheduled for a period of time. It relies on
karmada-scheduler-estimator to get replica status.`,
RunE: func(_ *cobra.Command, _ []string) error {
// validate options
if errs := opts.Validate(); len(errs) != 0 {
return errs.ToAggregate()
}
if err := run(opts, stopChan); err != nil {
if err := run(ctx, opts); err != nil {
return err
}
return nil
@ -103,17 +118,15 @@ karmada-scheduler-estimator to get replica status.`,
}
return nil
},
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
return err
}
logs.InitLogs()
return nil
},
}
fss := cliflag.NamedFlagSets{}
genericFlagSet := fss.FlagSet("generic")
opts.AddFlags(genericFlagSet)
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
klogflag.Add(logsFlagSet)
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaDeschedulerComponentName))
cmd.Flags().AddFlagSet(genericFlagSet)
cmd.Flags().AddFlagSet(logsFlagSet)
@ -123,9 +136,12 @@ karmada-scheduler-estimator to get replica status.`,
return cmd
}
func run(opts *options.Options, stopChan <-chan struct{}) error {
func run(ctx context.Context, opts *options.Options) error {
klog.Infof("karmada-descheduler version: %s", version.Get())
klog.Infof("Please make sure the karmada-scheduler-estimator of all member clusters has been deployed")
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector())
serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress)
profileflag.ListenAndServe(opts.ProfileOpts)
@ -134,17 +150,11 @@ func run(opts *options.Options, stopChan <-chan struct{}) error {
if err != nil {
return fmt.Errorf("error building kubeconfig: %s", err.Error())
}
restConfig.QPS, restConfig.Burst = opts.KubeAPIQPS, opts.KubeAPIBurst
restConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(opts.KubeAPIQPS, opts.KubeAPIBurst)
karmadaClient := karmadaclientset.NewForConfigOrDie(restConfig)
kubeClient := kubernetes.NewForConfigOrDie(restConfig)
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopChan
cancel()
}()
desched := descheduler.NewDescheduler(karmadaClient, kubeClient, opts)
if !opts.LeaderElection.LeaderElect {
desched.Run(ctx)

View File

@ -17,6 +17,8 @@ limitations under the License.
package app
import (
"context"
"fmt"
"net/http"
"testing"
"time"
@ -26,11 +28,12 @@ import (
"github.com/karmada-io/karmada/cmd/descheduler/app/options"
"github.com/karmada-io/karmada/pkg/util/names"
testingutil "github.com/karmada-io/karmada/pkg/util/testing"
)
func TestNewDeschedulerCommand(t *testing.T) {
stopCh := make(chan struct{})
cmd := NewDeschedulerCommand(stopCh)
ctx := context.Background()
cmd := NewDeschedulerCommand(ctx)
assert.NotNil(t, cmd)
assert.Equal(t, names.KarmadaDeschedulerComponentName, cmd.Use)
@ -51,8 +54,8 @@ func TestDeschedulerCommandFlagParsing(t *testing.T) {
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
stopCh := make(chan struct{})
cmd := NewDeschedulerCommand(stopCh)
ctx := context.Background()
cmd := NewDeschedulerCommand(ctx)
cmd.SetArgs(tc.args)
err := cmd.ParseFlags(tc.args)
if tc.expectError {
@ -65,8 +68,10 @@ func TestDeschedulerCommandFlagParsing(t *testing.T) {
}
func TestServeHealthzAndMetrics(t *testing.T) {
healthAddress := "127.0.0.1:8082"
metricsAddress := "127.0.0.1:8083"
ports, err := testingutil.GetFreePorts("127.0.0.1", 2)
require.NoError(t, err)
healthAddress := fmt.Sprintf("127.0.0.1:%d", ports[0])
metricsAddress := fmt.Sprintf("127.0.0.1:%d", ports[1])
go serveHealthzAndMetrics(healthAddress, metricsAddress)

View File

@ -20,6 +20,7 @@ import (
"os"
"k8s.io/component-base/cli"
"k8s.io/component-base/logs"
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
controllerruntime "sigs.k8s.io/controller-runtime"
@ -27,8 +28,9 @@ import (
)
func main() {
stopChan := controllerruntime.SetupSignalHandler().Done()
command := app.NewDeschedulerCommand(stopChan)
code := cli.Run(command)
os.Exit(code)
ctx := controllerruntime.SetupSignalHandler()
cmd := app.NewDeschedulerCommand(ctx)
exitCode := cli.Run(cmd)
logs.FlushLogs()
os.Exit(exitCode)
}

View File

@ -31,9 +31,12 @@ import (
genericapiserver "k8s.io/apiserver/pkg/server"
genericfilters "k8s.io/apiserver/pkg/server/filters"
genericoptions "k8s.io/apiserver/pkg/server/options"
utilversion "k8s.io/apiserver/pkg/util/version"
"k8s.io/apiserver/pkg/util/compatibility"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/flowcontrol"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
logsv1 "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
@ -41,6 +44,7 @@ import (
"github.com/karmada-io/karmada/cmd/karmada-search/app/options"
searchscheme "github.com/karmada-io/karmada/pkg/apis/search/scheme"
"github.com/karmada-io/karmada/pkg/features"
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
generatedopenapi "github.com/karmada-io/karmada/pkg/generated/openapi"
@ -61,7 +65,17 @@ type Option func(*runtime.Registry)
// NewKarmadaSearchCommand creates a *cobra.Command object with default parameters
func NewKarmadaSearchCommand(ctx context.Context, registryOptions ...Option) *cobra.Command {
logConfig := logsv1.NewLoggingConfiguration()
fss := cliflag.NamedFlagSets{}
logsFlagSet := fss.FlagSet("logs")
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
logsv1.AddFlags(logConfig, logsFlagSet)
klogflag.Add(logsFlagSet)
genericFlagSet := fss.FlagSet("generic")
opts := options.NewOptions()
opts.AddFlags(genericFlagSet)
cmd := &cobra.Command{
Use: names.KarmadaSearchComponentName,
@ -79,17 +93,15 @@ capabilities such as global search and resource proxy in a multi-cloud environme
}
return nil
},
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
return err
}
logs.InitLogs()
return nil
},
}
fss := cliflag.NamedFlagSets{}
genericFlagSet := fss.FlagSet("generic")
opts.AddFlags(genericFlagSet)
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
klogflag.Add(logsFlagSet)
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaSearchComponentName))
cmd.Flags().AddFlagSet(genericFlagSet)
cmd.Flags().AddFlagSet(logsFlagSet)
@ -129,8 +141,11 @@ func run(ctx context.Context, o *options.Options, registryOptions ...Option) err
return nil
})
karmadaSharedInformerFactoryCacheSynced := make(chan struct{})
server.GenericAPIServer.AddPostStartHookOrDie("start-karmada-informers", func(context genericapiserver.PostStartHookContext) error {
config.ExtraConfig.KarmadaSharedInformerFactory.Start(context.Done())
config.ExtraConfig.KarmadaSharedInformerFactory.WaitForCacheSync(context.Done())
close(karmadaSharedInformerFactoryCacheSynced)
return nil
})
@ -139,14 +154,15 @@ func run(ctx context.Context, o *options.Options, registryOptions ...Option) err
if config.ExtraConfig.Controller != nil {
server.GenericAPIServer.AddPostStartHookOrDie("start-karmada-search-controller", func(context genericapiserver.PostStartHookContext) error {
// start ResourceRegistry controller
config.ExtraConfig.Controller.Start(context.Done())
<-karmadaSharedInformerFactoryCacheSynced
config.ExtraConfig.Controller.Start(context)
return nil
})
}
if config.ExtraConfig.ProxyController != nil {
server.GenericAPIServer.AddPostStartHookOrDie("start-karmada-proxy-controller", func(context genericapiserver.PostStartHookContext) error {
config.ExtraConfig.ProxyController.Start(context.Done())
config.ExtraConfig.ProxyController.Start(context)
return nil
})
@ -179,9 +195,8 @@ func config(o *options.Options, outOfTreeRegistryOptions ...Option) (*search.Con
return nil, err
}
serverConfig.ClientConfig.QPS = o.KubeAPIQPS
serverConfig.ClientConfig.Burst = o.KubeAPIBurst
serverConfig.Config.EffectiveVersion = utilversion.NewEffectiveVersion("1.0")
serverConfig.ClientConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(o.KubeAPIQPS, o.KubeAPIBurst)
serverConfig.Config.EffectiveVersion = compatibility.DefaultBuildEffectiveVersion()
httpClient, err := rest.HTTPClientFor(serverConfig.ClientConfig)
if err != nil {

View File

@ -20,6 +20,7 @@ import (
"os"
"k8s.io/component-base/cli"
"k8s.io/component-base/logs"
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
controllerruntime "sigs.k8s.io/controller-runtime"
@ -29,6 +30,7 @@ import (
func main() {
ctx := controllerruntime.SetupSignalHandler()
cmd := app.NewKarmadaSearchCommand(ctx)
code := cli.Run(cmd)
os.Exit(code)
exitCode := cli.Run(cmd)
logs.FlushLogs()
os.Exit(exitCode)
}

View File

@ -22,9 +22,14 @@ import (
"github.com/spf13/cobra"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
logsv1 "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
controllerruntime "sigs.k8s.io/controller-runtime"
"github.com/karmada-io/karmada/cmd/metrics-adapter/app/options"
"github.com/karmada-io/karmada/pkg/features"
"github.com/karmada-io/karmada/pkg/sharedcli"
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
"github.com/karmada-io/karmada/pkg/util/names"
@ -33,7 +38,17 @@ import (
// NewMetricsAdapterCommand creates a *cobra.Command object with default parameters
func NewMetricsAdapterCommand(ctx context.Context) *cobra.Command {
logConfig := logsv1.NewLoggingConfiguration()
fss := cliflag.NamedFlagSets{}
logsFlagSet := fss.FlagSet("logs")
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
logsv1.AddFlags(logConfig, logsFlagSet)
klogflag.Add(logsFlagSet)
genericFlagSet := fss.FlagSet("generic")
opts := options.NewOptions()
opts.AddFlags(genericFlagSet)
cmd := &cobra.Command{
Use: names.KarmadaMetricsAdapterComponentName,
@ -50,6 +65,19 @@ func NewMetricsAdapterCommand(ctx context.Context) *cobra.Command {
}
return nil
},
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
return err
}
logs.InitLogs()
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
// by setting a logger through log.SetLogger.
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
controllerruntime.SetLogger(klog.Background())
return nil
},
Args: func(cmd *cobra.Command, args []string) error {
for _, arg := range args {
if len(arg) > 0 {
@ -60,15 +88,6 @@ func NewMetricsAdapterCommand(ctx context.Context) *cobra.Command {
},
}
fss := cliflag.NamedFlagSets{}
genericFlagSet := fss.FlagSet("generic")
opts.AddFlags(genericFlagSet)
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
klogflag.Add(logsFlagSet)
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaMetricsAdapterComponentName))
cmd.Flags().AddFlagSet(genericFlagSet)
cmd.Flags().AddFlagSet(logsFlagSet)

View File

@ -28,6 +28,7 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
@ -37,6 +38,7 @@ import (
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
generatedopenapi "github.com/karmada-io/karmada/pkg/generated/openapi"
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
"github.com/karmada-io/karmada/pkg/metricsadapter"
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
"github.com/karmada-io/karmada/pkg/util"
@ -122,19 +124,20 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) {
}
// Config returns config for the metrics-adapter server given Options
func (o *Options) Config(stopCh <-chan struct{}) (*metricsadapter.MetricsServer, error) {
func (o *Options) Config(ctx context.Context) (*metricsadapter.MetricsServer, error) {
restConfig, err := clientcmd.BuildConfigFromFlags("", o.KubeConfig)
if err != nil {
klog.Errorf("Unable to build restConfig: %v", err)
return nil, err
}
restConfig.QPS, restConfig.Burst = o.KubeAPIQPS, o.KubeAPIBurst
restConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(o.KubeAPIQPS, o.KubeAPIBurst)
karmadaClient := karmadaclientset.NewForConfigOrDie(restConfig)
factory := informerfactory.NewSharedInformerFactory(karmadaClient, 0)
kubeClient := kubernetes.NewForConfigOrDie(restConfig)
kubeFactory := informers.NewSharedInformerFactory(kubeClient, 0)
metricsController := metricsadapter.NewMetricsController(stopCh, restConfig, factory, kubeFactory, &util.ClientOption{QPS: o.ClusterAPIQPS, Burst: o.ClusterAPIBurst})
limiterGetter := util.GetClusterRateLimiterGetter().SetDefaultLimits(o.ClusterAPIQPS, o.ClusterAPIBurst)
metricsController := metricsadapter.NewMetricsController(ctx, restConfig, factory, kubeFactory, &util.ClientOption{RateLimiterGetter: limiterGetter.GetRateLimiter})
metricsAdapter := metricsadapter.NewMetricsAdapter(metricsController, o.CustomMetricsAdapterServerOptions)
metricsAdapter.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(api.Scheme))
metricsAdapter.OpenAPIV3Config = genericapiserver.DefaultOpenAPIV3Config(generatedopenapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(api.Scheme))
@ -177,14 +180,14 @@ func (o *Options) Config(stopCh <-chan struct{}) (*metricsadapter.MetricsServer,
// Run runs the metrics-adapter with options. This should never exit.
func (o *Options) Run(ctx context.Context) error {
klog.Infof("karmada-metrics-adapter version: %s", version.Get())
legacyregistry.RawMustRegister(versionmetrics.NewBuildInfoCollector())
if o.MetricsBindAddress != "0" {
go serveMetrics(o.MetricsBindAddress)
}
profileflag.ListenAndServe(o.ProfileOpts)
stopCh := ctx.Done()
metricsServer, err := o.Config(stopCh)
metricsServer, err := o.Config(ctx)
if err != nil {
return err
}

View File

@ -20,8 +20,8 @@ import (
"os"
"k8s.io/component-base/cli"
"k8s.io/component-base/logs"
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
"k8s.io/klog/v2"
controllerruntime "sigs.k8s.io/controller-runtime"
"github.com/karmada-io/karmada/cmd/metrics-adapter/app"
@ -29,13 +29,9 @@ import (
func main() {
ctx := controllerruntime.SetupSignalHandler()
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
// by setting a logger through log.SetLogger.
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
controllerruntime.SetLogger(klog.Background())
cmd := app.NewMetricsAdapterCommand(ctx)
code := cli.Run(cmd)
os.Exit(code)
exitCode := cli.Run(cmd)
// Ensure any buffered log entries are flushed
logs.FlushLogs()
os.Exit(exitCode)
}

View File

@ -29,13 +29,18 @@ import (
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/flowcontrol"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
logsv1 "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"github.com/karmada-io/karmada/cmd/scheduler-estimator/app/options"
"github.com/karmada-io/karmada/pkg/estimator/server"
"github.com/karmada-io/karmada/pkg/features"
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
"github.com/karmada-io/karmada/pkg/sharedcli"
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
@ -75,12 +80,29 @@ const (
// NewSchedulerEstimatorCommand creates a *cobra.Command object with default parameters
func NewSchedulerEstimatorCommand(ctx context.Context) *cobra.Command {
logConfig := logsv1.NewLoggingConfiguration()
fss := cliflag.NamedFlagSets{}
logsFlagSet := fss.FlagSet("logs")
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
logsv1.AddFlags(logConfig, logsFlagSet)
klogflag.Add(logsFlagSet)
genericFlagSet := fss.FlagSet("generic")
opts := options.NewOptions()
opts.AddFlags(genericFlagSet)
cmd := &cobra.Command{
Use: names.KarmadaSchedulerEstimatorComponentName,
Long: `The karmada-scheduler-estimator runs an accurate scheduler estimator of a cluster. It
Long: `The karmada-scheduler-estimator runs an accurate scheduler estimator of a cluster. It
provides the scheduler with more accurate cluster resource information.`,
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
return err
}
logs.InitLogs()
return nil
},
RunE: func(_ *cobra.Command, _ []string) error {
// validate options
if errs := opts.Validate(); len(errs) != 0 {
@ -93,15 +115,6 @@ provides the scheduler with more accurate cluster resource information.`,
},
}
fss := cliflag.NamedFlagSets{}
genericFlagSet := fss.FlagSet("generic")
opts.AddFlags(genericFlagSet)
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
klogflag.Add(logsFlagSet)
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaSchedulerEstimatorComponentName))
cmd.Flags().AddFlagSet(genericFlagSet)
cmd.Flags().AddFlagSet(logsFlagSet)
@ -113,6 +126,9 @@ provides the scheduler with more accurate cluster resource information.`,
func run(ctx context.Context, opts *options.Options) error {
klog.Infof("karmada-scheduler-estimator version: %s", version.Get())
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector())
serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress)
profileflag.ListenAndServe(opts.ProfileOpts)
@ -121,13 +137,13 @@ func run(ctx context.Context, opts *options.Options) error {
if err != nil {
return fmt.Errorf("error building kubeconfig: %s", err.Error())
}
restConfig.QPS, restConfig.Burst = opts.ClusterAPIQPS, opts.ClusterAPIBurst
restConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(opts.ClusterAPIQPS, opts.ClusterAPIBurst)
kubeClient := kubernetes.NewForConfigOrDie(restConfig)
dynamicClient := dynamic.NewForConfigOrDie(restConfig)
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(restConfig)
e, err := server.NewEstimatorServer(kubeClient, dynamicClient, discoveryClient, opts, ctx.Done())
e, err := server.NewEstimatorServer(ctx, kubeClient, dynamicClient, discoveryClient, opts)
if err != nil {
klog.Errorf("Fail to create estimator server: %v", err)
return err

View File

@ -20,6 +20,7 @@ import (
"os"
"k8s.io/component-base/cli"
"k8s.io/component-base/logs"
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
controllerruntime "sigs.k8s.io/controller-runtime"
@ -30,5 +31,7 @@ func main() {
ctx := controllerruntime.SetupSignalHandler()
cmd := app.NewSchedulerEstimatorCommand(ctx)
code := cli.Run(cmd)
// Ensure any buffered log entries are flushed
logs.FlushLogs()
os.Exit(code)
}

View File

@ -32,13 +32,18 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/util/flowcontrol"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
logsv1 "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"github.com/karmada-io/karmada/cmd/scheduler/app/options"
"github.com/karmada-io/karmada/pkg/features"
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
"github.com/karmada-io/karmada/pkg/scheduler"
"github.com/karmada-io/karmada/pkg/scheduler/framework/runtime"
"github.com/karmada-io/karmada/pkg/sharedcli"
@ -89,8 +94,19 @@ func WithPlugin(name string, factory runtime.PluginFactory) Option {
}
// NewSchedulerCommand creates a *cobra.Command object with default parameters
func NewSchedulerCommand(stopChan <-chan struct{}, registryOptions ...Option) *cobra.Command {
func NewSchedulerCommand(ctx context.Context, registryOptions ...Option) *cobra.Command {
logConfig := logsv1.NewLoggingConfiguration()
fss := cliflag.NamedFlagSets{}
logsFlagSet := fss.FlagSet("logs")
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
logsv1.AddFlags(logConfig, logsFlagSet)
klogflag.Add(logsFlagSet)
genericFlagSet := fss.FlagSet("generic")
opts := options.NewOptions()
opts.AddFlags(genericFlagSet)
cmd := &cobra.Command{
Use: names.KarmadaSchedulerComponentName,
@ -103,11 +119,18 @@ the most suitable cluster.`,
if errs := opts.Validate(); len(errs) != 0 {
return errs.ToAggregate()
}
if err := run(opts, stopChan, registryOptions...); err != nil {
if err := run(ctx, opts, registryOptions...); err != nil {
return err
}
return nil
},
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
return err
}
logs.InitLogs()
return nil
},
Args: func(cmd *cobra.Command, args []string) error {
for _, arg := range args {
if len(arg) > 0 {
@ -118,14 +141,6 @@ the most suitable cluster.`,
},
}
fss := cliflag.NamedFlagSets{}
genericFlagSet := fss.FlagSet("generic")
opts.AddFlags(genericFlagSet)
// Set klog flags
logsFlagSet := fss.FlagSet("logs")
klogflag.Add(logsFlagSet)
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaSchedulerComponentName))
cmd.Flags().AddFlagSet(genericFlagSet)
@ -136,8 +151,10 @@ the most suitable cluster.`,
return cmd
}
func run(opts *options.Options, stopChan <-chan struct{}, registryOptions ...Option) error {
func run(ctx context.Context, opts *options.Options, registryOptions ...Option) error {
klog.Infof("karmada-scheduler version: %s", version.Get())
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector())
serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress)
profileflag.ListenAndServe(opts.ProfileOpts)
@ -146,17 +163,12 @@ func run(opts *options.Options, stopChan <-chan struct{}, registryOptions ...Opt
if err != nil {
return fmt.Errorf("error building kubeconfig: %s", err.Error())
}
restConfig.QPS, restConfig.Burst = opts.KubeAPIQPS, opts.KubeAPIBurst
restConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(opts.KubeAPIQPS, opts.KubeAPIBurst)
dynamicClientSet := dynamic.NewForConfigOrDie(restConfig)
karmadaClient := karmadaclientset.NewForConfigOrDie(restConfig)
kubeClientSet := kubernetes.NewForConfigOrDie(restConfig)
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopChan
cancel()
}()
outOfTreeRegistry := make(runtime.Registry)
for _, option := range registryOptions {
if err := option(outOfTreeRegistry); err != nil {

Some files were not shown because too many files have changed in this diff Show More