Compare commits
11 Commits
Author | SHA1 | Date |
---|---|---|
|
ef9a2a134b | |
|
94cdc526d9 | |
|
78c85f8740 | |
|
c2dcbed1a0 | |
|
2987ffbd45 | |
|
b105c93773 | |
|
f1b8d468f6 | |
|
fab1c46957 | |
|
74b345abe3 | |
|
53c6aeded3 | |
|
9034cc4e9f |
|
@ -1,7 +1,31 @@
|
||||||
|
.github/
|
||||||
.idea/
|
.idea/
|
||||||
.vscode/
|
.vscode/
|
||||||
bin/
|
bin/
|
||||||
codecov.yaml
|
charts/
|
||||||
cover.out
|
docs/
|
||||||
|
config/
|
||||||
|
examples/
|
||||||
|
hack/
|
||||||
|
manifest/
|
||||||
|
spark-docker/
|
||||||
|
sparkctl/
|
||||||
|
test/
|
||||||
|
vendor/
|
||||||
|
.dockerignore
|
||||||
.DS_Store
|
.DS_Store
|
||||||
*.iml
|
.gitignore
|
||||||
|
.gitlab-ci.yaml
|
||||||
|
.golangci.yaml
|
||||||
|
.pre-commit-config.yaml
|
||||||
|
ADOPTERS.md
|
||||||
|
CODE_OF_CONDUCT.md
|
||||||
|
codecov.ymal
|
||||||
|
CONTRIBUTING.md
|
||||||
|
cover.out
|
||||||
|
Dockerfile
|
||||||
|
LICENSE
|
||||||
|
OWNERS
|
||||||
|
PROJECT
|
||||||
|
README.md
|
||||||
|
test.sh
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: '[BUG] Brief description of the issue'
|
||||||
|
labels: bug
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
Please provide a clear and concise description of the issue you are encountering, and a reproduction of your configuration.
|
||||||
|
|
||||||
|
If your request is for a new feature, please use the `Feature request` template.
|
||||||
|
|
||||||
|
- [ ] ✋ I have searched the open/closed issues and my issue is not listed.
|
||||||
|
|
||||||
|
## Reproduction Code [Required]
|
||||||
|
|
||||||
|
<!-- REQUIRED -->
|
||||||
|
|
||||||
|
Steps to reproduce the behavior:
|
||||||
|
|
||||||
|
|
||||||
|
## Expected behavior
|
||||||
|
|
||||||
|
<!-- A clear and concise description of what you expected to happen -->
|
||||||
|
|
||||||
|
## Actual behavior
|
||||||
|
|
||||||
|
<!-- A clear and concise description of what actually happened -->
|
||||||
|
|
||||||
|
### Terminal Output Screenshot(s)
|
||||||
|
|
||||||
|
<!-- Optional but helpful -->
|
||||||
|
|
||||||
|
|
||||||
|
## Environment & Versions
|
||||||
|
|
||||||
|
- Spark Operator App version:
|
||||||
|
- Helm Chart Version:
|
||||||
|
- Kubernetes Version:
|
||||||
|
- Apache Spark version:
|
||||||
|
|
||||||
|
## Additional context
|
||||||
|
|
||||||
|
<!-- Add any other context about the problem here -->
|
||||||
|
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
name: Bug Report
|
|
||||||
description: Tell us about a problem you are experiencing with the Spark operator.
|
|
||||||
labels:
|
|
||||||
- kind/bug
|
|
||||||
- lifecycle/needs-triage
|
|
||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
Thanks for taking the time to fill out this Spark operator bug report!
|
|
||||||
- type: textarea
|
|
||||||
id: problem
|
|
||||||
attributes:
|
|
||||||
label: What happened?
|
|
||||||
description: |
|
|
||||||
Please provide a clear and concise description of the issue you are encountering, and a reproduction of your configuration.
|
|
||||||
If your request is for a new feature, please use the `Feature request` template.
|
|
||||||
value: |
|
|
||||||
- [ ] ✋ I have searched the open/closed issues and my issue is not listed.
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: reproduce
|
|
||||||
attributes:
|
|
||||||
label: Reproduction Code
|
|
||||||
description: Steps to reproduce the behavior.
|
|
||||||
- type: textarea
|
|
||||||
id: expected
|
|
||||||
attributes:
|
|
||||||
label: Expected behavior
|
|
||||||
description: A clear and concise description of what you expected to happen.
|
|
||||||
- type: textarea
|
|
||||||
id: actual
|
|
||||||
attributes:
|
|
||||||
label: Actual behavior
|
|
||||||
description: A clear and concise description of what actually happened.
|
|
||||||
- type: textarea
|
|
||||||
id: environment
|
|
||||||
attributes:
|
|
||||||
label: Environment & Versions
|
|
||||||
value: |
|
|
||||||
- Kubernetes Version:
|
|
||||||
- Spark Operator Version:
|
|
||||||
- Apache Spark Version:
|
|
||||||
- type: textarea
|
|
||||||
id: context
|
|
||||||
attributes:
|
|
||||||
label: Additional context
|
|
||||||
description: Add any other context about the problem here.
|
|
||||||
- type: input
|
|
||||||
id: votes
|
|
||||||
attributes:
|
|
||||||
label: Impacted by this bug?
|
|
||||||
value: Give it a 👍 We prioritize the issues with most 👍
|
|
|
@ -1,9 +0,0 @@
|
||||||
blank_issues_enabled: true
|
|
||||||
|
|
||||||
contact_links:
|
|
||||||
- name: Spark Operator Documentation
|
|
||||||
url: https://www.kubeflow.org/docs/components/spark-operator
|
|
||||||
about: Much help can be found in the docs
|
|
||||||
- name: Spark Operator Slack Channel
|
|
||||||
url: https://app.slack.com/client/T08PSQ7BQ/C074588U7EG
|
|
||||||
about: Ask questions about the Spark Operator
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: '[FEATURE] Brief description of the feature'
|
||||||
|
labels: enhancement
|
||||||
|
---
|
||||||
|
|
||||||
|
<!--- Please keep this note for the community --->
|
||||||
|
|
||||||
|
### Community Note
|
||||||
|
|
||||||
|
* Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to the original issue to help the community and maintainers prioritize this request
|
||||||
|
* Please do not leave "+1" or other comments that do not add relevant new information or questions, they generate extra noise for issue followers and do not help prioritize the request
|
||||||
|
* If you are interested in working on this issue or have submitted a pull request, please leave a comment
|
||||||
|
|
||||||
|
<!--- Thank you for keeping this note for the community --->
|
||||||
|
|
||||||
|
#### What is the outcome that you are trying to reach?
|
||||||
|
|
||||||
|
<!-- A clear and concise description of what the problem is. -->
|
||||||
|
|
||||||
|
#### Describe the solution you would like
|
||||||
|
|
||||||
|
<!-- A clear and concise description of what you want to happen. -->
|
||||||
|
|
||||||
|
#### Describe alternatives you have considered
|
||||||
|
|
||||||
|
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||||
|
|
||||||
|
#### Additional context
|
||||||
|
|
||||||
|
<!-- Add any other context or screenshots about the feature request here. -->
|
|
@ -1,47 +0,0 @@
|
||||||
name: Feature Request
|
|
||||||
description: Suggest an idea for the Spark operator.
|
|
||||||
labels:
|
|
||||||
- kind/feature
|
|
||||||
- lifecycle/needs-triage
|
|
||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
Thanks for taking the time to fill out this Spark operator feature request!
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
- Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to the original issue to help the community and maintainers prioritize this request.
|
|
||||||
- Please do not leave "+1" or other comments that do not add relevant new information or questions, they generate extra noise for issue followers and do not help prioritize the request.
|
|
||||||
- If you are interested in working on this issue or have submitted a pull request, please leave a comment.
|
|
||||||
- type: textarea
|
|
||||||
id: feature
|
|
||||||
attributes:
|
|
||||||
label: What feature you would like to be added?
|
|
||||||
description: |
|
|
||||||
A clear and concise description of what you want to add to the Spark operator.
|
|
||||||
Please consider to write a Spark operator enhancement proposal if it is a large feature request.
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: rationale
|
|
||||||
attributes:
|
|
||||||
label: Why is this needed?
|
|
||||||
- type: textarea
|
|
||||||
id: solution
|
|
||||||
attributes:
|
|
||||||
label: Describe the solution you would like
|
|
||||||
- type: textarea
|
|
||||||
id: alternatives
|
|
||||||
attributes:
|
|
||||||
label: Describe alternatives you have considered
|
|
||||||
- type: textarea
|
|
||||||
id: context
|
|
||||||
attributes:
|
|
||||||
label: Additional context
|
|
||||||
description: Add any other context or screenshots about the feature request here.
|
|
||||||
- type: input
|
|
||||||
id: votes
|
|
||||||
attributes:
|
|
||||||
label: Love this feature?
|
|
||||||
value: Give it a 👍 We prioritize the features with most 👍
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
---
|
||||||
|
name: Question
|
||||||
|
about: I have a Question
|
||||||
|
title: '[QUESTION] Brief description of the Question'
|
||||||
|
labels: question
|
||||||
|
---
|
||||||
|
|
||||||
|
- [ ] ✋ I have searched the open/closed issues and my issue is not listed.
|
||||||
|
|
||||||
|
#### Please describe your question here
|
||||||
|
|
||||||
|
<!-- Provide as much information as possible to explain your question -->
|
||||||
|
|
||||||
|
#### Provide a link to the example/module related to the question
|
||||||
|
|
||||||
|
<!-- Please provide the link to the example related to this question from this repo -->
|
||||||
|
|
||||||
|
#### Additional context
|
||||||
|
|
||||||
|
<!-- Add any other context or screenshots about the question here -->
|
|
@ -1,30 +0,0 @@
|
||||||
name: Question
|
|
||||||
description: Ask question about the Spark operator.
|
|
||||||
labels:
|
|
||||||
- kind/question
|
|
||||||
- lifecycle/needs-triage
|
|
||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
Thanks for taking the time to fill out this question!
|
|
||||||
- type: textarea
|
|
||||||
id: feature
|
|
||||||
attributes:
|
|
||||||
label: What question do you want to ask?
|
|
||||||
description: |
|
|
||||||
A clear and concise description of what you want to ask about the Spark operator.
|
|
||||||
value: |
|
|
||||||
- [ ] ✋ I have searched the open/closed issues and my issue is not listed.
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: rationale
|
|
||||||
attributes:
|
|
||||||
label: Additional context
|
|
||||||
description: Add any other context or screenshots about the question here.
|
|
||||||
- type: input
|
|
||||||
id: votes
|
|
||||||
attributes:
|
|
||||||
label: Have the same question?
|
|
||||||
value: Give it a 👍 We prioritize the question with most 👍
|
|
|
@ -1,23 +1,18 @@
|
||||||
<!-- Thanks for sending a pull request! Here are some tips for you:
|
### 🛑 Important:
|
||||||
1. If this is your first time, check our contributor guidelines: https://www.kubeflow.org/docs/about/contributing
|
Please open an issue to discuss significant work before you start. We appreciate your contributions and don't want your efforts to go to waste!
|
||||||
2. To know more about how to develop with the Spark operator, check the developer guide: https://www.kubeflow.org/docs/components/spark-operator/developer-guide/
|
|
||||||
3. If you want *faster* PR reviews, check how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
|
For guidelines on how to contribute, please review the [CONTRIBUTING.md](CONTRIBUTING.md) document.
|
||||||
4. Please open an issue to discuss significant work before you start. We appreciate your contributions and don't want your efforts to go to waste!
|
|
||||||
-->
|
|
||||||
|
|
||||||
## Purpose of this PR
|
## Purpose of this PR
|
||||||
|
Provide a clear and concise description of the changes. Explain the motivation behind these changes and link to relevant issues or discussions.
|
||||||
<!-- Provide a clear and concise description of the changes. Explain the motivation behind these changes and link to relevant issues or discussions. -->
|
|
||||||
|
|
||||||
**Proposed changes:**
|
**Proposed changes:**
|
||||||
|
|
||||||
- <Change 1>
|
- <Change 1>
|
||||||
- <Change 2>
|
- <Change 2>
|
||||||
- <Change 3>
|
- <Change 3>
|
||||||
|
|
||||||
## Change Category
|
## Change Category
|
||||||
|
Indicate the type of change by marking the applicable boxes:
|
||||||
<!-- Indicate the type of change by marking the applicable boxes. -->
|
|
||||||
|
|
||||||
- [ ] Bugfix (non-breaking change which fixes an issue)
|
- [ ] Bugfix (non-breaking change which fixes an issue)
|
||||||
- [ ] Feature (non-breaking change which adds functionality)
|
- [ ] Feature (non-breaking change which adds functionality)
|
||||||
|
@ -28,9 +23,9 @@
|
||||||
|
|
||||||
<!-- Provide reasoning for the changes if not already covered in the description above. -->
|
<!-- Provide reasoning for the changes if not already covered in the description above. -->
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!-- Before submitting your PR, please review the following: -->
|
## Checklist
|
||||||
|
Before submitting your PR, please review the following:
|
||||||
|
|
||||||
- [ ] I have conducted a self-review of my own code.
|
- [ ] I have conducted a self-review of my own code.
|
||||||
- [ ] I have updated documentation accordingly.
|
- [ ] I have updated documentation accordingly.
|
||||||
|
@ -40,3 +35,4 @@
|
||||||
### Additional Notes
|
### Additional Notes
|
||||||
|
|
||||||
<!-- Include any additional notes or context that could be helpful for the reviewers here. -->
|
<!-- Include any additional notes or context that could be helpful for the reviewers here. -->
|
||||||
|
|
||||||
|
|
|
@ -47,10 +47,6 @@ jobs:
|
||||||
false
|
false
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Verify Codegen
|
|
||||||
run: |
|
|
||||||
make verify-codegen
|
|
||||||
|
|
||||||
- name: Run go fmt check
|
- name: Run go fmt check
|
||||||
run: |
|
run: |
|
||||||
make go-fmt
|
make go-fmt
|
||||||
|
@ -114,6 +110,22 @@ jobs:
|
||||||
- name: Build Spark operator
|
- name: Build Spark operator
|
||||||
run: make build-operator
|
run: make build-operator
|
||||||
|
|
||||||
|
build-sparkctl:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout source code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
|
||||||
|
- name: Build sparkctl
|
||||||
|
run: make build-sparkctl
|
||||||
|
|
||||||
build-helm-chart:
|
build-helm-chart:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
@ -140,7 +152,7 @@ jobs:
|
||||||
version: v3.14.3
|
version: v3.14.3
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.7.0
|
uses: helm/chart-testing-action@v2.6.1
|
||||||
|
|
||||||
- name: Generate manifests
|
- name: Generate manifests
|
||||||
run: |
|
run: |
|
||||||
|
@ -154,9 +166,6 @@ jobs:
|
||||||
- name: Detect CRDs drift between chart and manifest
|
- name: Detect CRDs drift between chart and manifest
|
||||||
run: make detect-crds-drift
|
run: make detect-crds-drift
|
||||||
|
|
||||||
- name: Run helm unittest
|
|
||||||
run: make helm-unittest
|
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
env:
|
env:
|
||||||
|
@ -184,7 +193,7 @@ jobs:
|
||||||
|
|
||||||
- name: setup minikube
|
- name: setup minikube
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
uses: manusa/actions-setup-minikube@v2.14.0
|
uses: manusa/actions-setup-minikube@v2.12.0
|
||||||
with:
|
with:
|
||||||
minikube version: v1.33.0
|
minikube version: v1.33.0
|
||||||
kubernetes version: v1.30.0
|
kubernetes version: v1.30.0
|
||||||
|
@ -194,24 +203,12 @@ jobs:
|
||||||
- name: Run chart-testing (install)
|
- name: Run chart-testing (install)
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
run: |
|
run: |
|
||||||
docker build -t ghcr.io/kubeflow/spark-operator/controller:local .
|
docker build -t docker.io/kubeflow/spark-operator:local .
|
||||||
minikube image load ghcr.io/kubeflow/spark-operator/controller:local
|
minikube image load docker.io/kubeflow/spark-operator:local
|
||||||
ct install --target-branch ${{ steps.get_branch.outputs.BRANCH }}
|
ct install
|
||||||
|
|
||||||
e2e-test:
|
e2e-test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
k8s_version:
|
|
||||||
- v1.24.17
|
|
||||||
- v1.25.16
|
|
||||||
- v1.26.15
|
|
||||||
- v1.27.16
|
|
||||||
- v1.28.15
|
|
||||||
- v1.29.12
|
|
||||||
- v1.30.8
|
|
||||||
- v1.31.4
|
|
||||||
- v1.32.0
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout source code
|
- name: Checkout source code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
@ -224,10 +221,11 @@ jobs:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
|
|
||||||
- name: Create a Kind cluster
|
- name: Create a Kind cluster
|
||||||
run: make kind-create-cluster KIND_K8S_VERSION=${{ matrix.k8s_version }}
|
run: make kind-create-cluster
|
||||||
|
|
||||||
- name: Build and load image to Kind cluster
|
- name: Build and load image to Kind cluster
|
||||||
run: make kind-load-image IMAGE_TAG=local
|
run: |
|
||||||
|
make kind-load-image IMAGE_TAG=local
|
||||||
|
|
||||||
- name: Run e2e tests
|
- name: Run e2e tests
|
||||||
run: make e2e-test
|
run: make e2e-test
|
||||||
|
|
|
@ -13,8 +13,8 @@ concurrency:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
SEMVER_PATTERN: '^v([0-9]+)\.([0-9]+)\.([0-9]+)(-rc\.([0-9]+))?$'
|
SEMVER_PATTERN: '^v([0-9]+)\.([0-9]+)\.([0-9]+)(-rc\.([0-9]+))?$'
|
||||||
IMAGE_REGISTRY: ghcr.io
|
IMAGE_REGISTRY: docker.io
|
||||||
IMAGE_REPOSITORY: kubeflow/spark-operator/controller
|
IMAGE_REPOSITORY: kubeflow/spark-operator
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-release:
|
check-release:
|
||||||
|
@ -90,6 +90,10 @@ jobs:
|
||||||
- name: Read version from VERSION file
|
- name: Read version from VERSION file
|
||||||
run: |
|
run: |
|
||||||
VERSION=$(cat VERSION)
|
VERSION=$(cat VERSION)
|
||||||
|
if [[ ! ${VERSION} =~ ${{ env.SEMVER_PATTERN }} ]]; then
|
||||||
|
echo "Version '${VERSION}' does not match semver pattern."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
echo "VERSION=${VERSION}" >> $GITHUB_ENV
|
echo "VERSION=${VERSION}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
|
@ -110,8 +114,8 @@ jobs:
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.IMAGE_REGISTRY }}
|
registry: ${{ env.IMAGE_REGISTRY }}
|
||||||
username: ${{ github.actor }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and push by digest
|
- name: Build and push by digest
|
||||||
id: build
|
id: build
|
||||||
|
@ -172,8 +176,8 @@ jobs:
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.IMAGE_REGISTRY }}
|
registry: ${{ env.IMAGE_REGISTRY }}
|
||||||
username: ${{ github.actor }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Create manifest list and push
|
- name: Create manifest list and push
|
||||||
working-directory: /tmp/digests
|
working-directory: /tmp/digests
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
name: Mark stale issues and pull requests
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 */2 * * *"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
stale:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/stale@v9
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
days-before-stale: 90
|
|
||||||
days-before-close: 20
|
|
||||||
operations-per-run: 200
|
|
||||||
stale-issue-message: >
|
|
||||||
This issue has been automatically marked as stale because it has not had
|
|
||||||
recent activity. It will be closed if no further activity occurs. Thank you
|
|
||||||
for your contributions.
|
|
||||||
close-issue-message: >
|
|
||||||
This issue has been automatically closed because it has not had recent
|
|
||||||
activity. Please comment "/reopen" to reopen it.
|
|
||||||
stale-issue-label: lifecycle/stale
|
|
||||||
exempt-issue-labels: lifecycle/frozen
|
|
||||||
stale-pr-message: >
|
|
||||||
This pull request has been automatically marked as stale because it has not had
|
|
||||||
recent activity. It will be closed if no further activity occurs. Thank you
|
|
||||||
for your contributions.
|
|
||||||
close-pr-message: >
|
|
||||||
This pull request has been automatically closed because it has not had recent
|
|
||||||
activity. Please comment "/reopen" to reopen it.
|
|
||||||
stale-pr-label: lifecycle/stale
|
|
||||||
exempt-pr-labels: lifecycle/frozen
|
|
|
@ -1,32 +0,0 @@
|
||||||
name: Trivy image scanning
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 0 * * 1' # Every Monday at 00:00
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
image-scanning:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Add image to environment
|
|
||||||
run: make print-IMAGE >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: trivy scan for github security tab
|
|
||||||
uses: aquasecurity/trivy-action@0.32.0
|
|
||||||
with:
|
|
||||||
image-ref: '${{ env.IMAGE }}'
|
|
||||||
format: 'sarif'
|
|
||||||
ignore-unfixed: true
|
|
||||||
vuln-type: 'os,library'
|
|
||||||
severity: 'CRITICAL,HIGH'
|
|
||||||
output: 'trivy-results.sarif'
|
|
||||||
timeout: 30m0s
|
|
||||||
|
|
||||||
- name: Upload Trivy scan results to GitHub Security tab
|
|
||||||
uses: github/codeql-action/upload-sarif@v3
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
sarif_file: 'trivy-results.sarif'
|
|
|
@ -1,7 +1,11 @@
|
||||||
.idea/
|
|
||||||
.vscode/
|
|
||||||
bin/
|
bin/
|
||||||
codecov.yaml
|
vendor/
|
||||||
cover.out
|
cover.out
|
||||||
.DS_Store
|
sparkctl/sparkctl
|
||||||
*.iml
|
sparkctl/sparkctl-linux-amd64
|
||||||
|
sparkctl/sparkctl-darwin-amd64
|
||||||
|
**/*.iml
|
||||||
|
|
||||||
|
# Various IDEs
|
||||||
|
.idea/
|
||||||
|
.vscode/
|
|
@ -1,10 +1,7 @@
|
||||||
version: "2"
|
|
||||||
|
|
||||||
run:
|
run:
|
||||||
# Timeout for total work, e.g. 30s, 5m, 5m30s.
|
# Timeout for analysis, e.g. 30s, 5m.
|
||||||
# If the value is lower or equal to 0, the timeout is disabled.
|
# Default: 1m
|
||||||
# Default: 0 (disabled)
|
timeout: 1m
|
||||||
timeout: 2m
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
# Enable specific linters.
|
# Enable specific linters.
|
||||||
|
@ -16,6 +13,8 @@ linters:
|
||||||
- dupword
|
- dupword
|
||||||
# Tool for detection of FIXME, TODO and other comment keywords.
|
# Tool for detection of FIXME, TODO and other comment keywords.
|
||||||
# - godox
|
# - godox
|
||||||
|
# Check import statements are formatted according to the 'goimport' command.
|
||||||
|
- goimports
|
||||||
# Enforces consistent import aliases.
|
# Enforces consistent import aliases.
|
||||||
- importas
|
- importas
|
||||||
# Find code that shadows one of Go's predeclared identifiers.
|
# Find code that shadows one of Go's predeclared identifiers.
|
||||||
|
@ -27,28 +26,15 @@ linters:
|
||||||
# Checks Go code for unused constants, variables, functions and types.
|
# Checks Go code for unused constants, variables, functions and types.
|
||||||
- unused
|
- unused
|
||||||
|
|
||||||
settings:
|
|
||||||
importas:
|
|
||||||
# List of aliases
|
|
||||||
alias:
|
|
||||||
- pkg: k8s.io/api/admissionregistration/v1
|
|
||||||
alias: admissionregistrationv1
|
|
||||||
- pkg: k8s.io/api/apps/v1
|
|
||||||
alias: appsv1
|
|
||||||
- pkg: k8s.io/api/batch/v1
|
|
||||||
alias: batchv1
|
|
||||||
- pkg: k8s.io/api/core/v1
|
|
||||||
alias: corev1
|
|
||||||
- pkg: k8s.io/api/extensions/v1beta1
|
|
||||||
alias: extensionsv1beta1
|
|
||||||
- pkg: k8s.io/api/networking/v1
|
|
||||||
alias: networkingv1
|
|
||||||
- pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
|
||||||
alias: metav1
|
|
||||||
- pkg: sigs.k8s.io/controller-runtime
|
|
||||||
alias: ctrl
|
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
# Which dirs to exclude: issues from them won't be reported.
|
||||||
|
# Can use regexp here: `generated.*`, regexp is applied on full path,
|
||||||
|
# including the path prefix if one is set.
|
||||||
|
# Default dirs are skipped independently of this option's value (see exclude-dirs-use-default).
|
||||||
|
# "/" will be replaced by current OS file path separator to properly work on Windows.
|
||||||
|
# Default: []
|
||||||
|
exclude-dirs:
|
||||||
|
- sparkctl
|
||||||
# Maximum issues count per one linter.
|
# Maximum issues count per one linter.
|
||||||
# Set to 0 to disable.
|
# Set to 0 to disable.
|
||||||
# Default: 50
|
# Default: 50
|
||||||
|
@ -58,8 +44,23 @@ issues:
|
||||||
# Default: 3
|
# Default: 3
|
||||||
max-same-issues: 3
|
max-same-issues: 3
|
||||||
|
|
||||||
formatters:
|
linters-settings:
|
||||||
enable:
|
importas:
|
||||||
# Check import statements are formatted according to the 'goimport' command.
|
# List of aliases
|
||||||
- goimports
|
alias:
|
||||||
|
- pkg: k8s.io/api/admissionregistration/v1
|
||||||
|
alias: admissionregistrationv1
|
||||||
|
- pkg: k8s.io/api/apps/v1
|
||||||
|
alias: appsv1
|
||||||
|
- pkg: k8s.io/api/batch/v1
|
||||||
|
alias: batchv1
|
||||||
|
- pkg: k8s.io/api/core/v1
|
||||||
|
alias: corev1
|
||||||
|
- pkg: k8s.io/api/extensions/v1beta1
|
||||||
|
alias: extensionsv1beta1
|
||||||
|
- pkg: k8s.io/api/networking/v1
|
||||||
|
alias: networkingv1
|
||||||
|
- pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
||||||
|
alias: metav1
|
||||||
|
- pkg: sigs.k8s.io/controller-runtime
|
||||||
|
alias: ctrl
|
||||||
|
|
|
@ -4,8 +4,6 @@ Below are the adopters of project Spark Operator. If you are using Spark Operato
|
||||||
|
|
||||||
| Organization | Contact (GitHub User Name) | Environment | Description of Use |
|
| Organization | Contact (GitHub User Name) | Environment | Description of Use |
|
||||||
| ------------- | ------------- | ------------- | ------------- |
|
| ------------- | ------------- | ------------- | ------------- |
|
||||||
| [Alibaba Cloud](https://www.alibabacloud.com) | [@ChenYi015](https://github.com/ChenYi015) | Production | AI & Data Infrastructure |
|
|
||||||
| [APRA AMCOS](https://www.apraamcos.com.au/) | @shuch3ng | Production | Data Platform |
|
|
||||||
| [Beeline](https://beeline.ru) | @spestua | Evaluation | ML & Data Infrastructure |
|
| [Beeline](https://beeline.ru) | @spestua | Evaluation | ML & Data Infrastructure |
|
||||||
| Bringg | @EladDolev | Production | ML & Analytics Data Platform |
|
| Bringg | @EladDolev | Production | ML & Analytics Data Platform |
|
||||||
| [Caicloud](https://intl.caicloud.io/) | @gaocegege | Production | Cloud-Native AI Platform |
|
| [Caicloud](https://intl.caicloud.io/) | @gaocegege | Production | Cloud-Native AI Platform |
|
||||||
|
@ -15,7 +13,7 @@ Below are the adopters of project Spark Operator. If you are using Spark Operato
|
||||||
| CloudZone | @iftachsc | Evaluation | Big Data Analytics Consultancy |
|
| CloudZone | @iftachsc | Evaluation | Big Data Analytics Consultancy |
|
||||||
| Cyren | @avnerl | Evaluation | Data pipelines |
|
| Cyren | @avnerl | Evaluation | Data pipelines |
|
||||||
| [C2FO](https://www.c2fo.com/) | @vanhoale | Production | Data Platform / Data Infrastructure |
|
| [C2FO](https://www.c2fo.com/) | @vanhoale | Production | Data Platform / Data Infrastructure |
|
||||||
| [Spot by Netapp](https://spot.io/product/ocean-apache-spark/) | @ImpSy | Production | Managed Spark Platform |
|
| [Data Mechanics](https://www.datamechanics.co) | @jrj-d | Production | Managed Spark Platform |
|
||||||
| [DeepCure](https://www.deepcure.ai) | @mschroering | Production | Spark / ML |
|
| [DeepCure](https://www.deepcure.ai) | @mschroering | Production | Spark / ML |
|
||||||
| [DiDi](https://www.didiglobal.com) | @Run-Lin | Evaluation | Data Infrastructure |
|
| [DiDi](https://www.didiglobal.com) | @Run-Lin | Evaluation | Data Infrastructure |
|
||||||
| Exacaster | @minutis | Evaluation | Data pipelines |
|
| Exacaster | @minutis | Evaluation | Data pipelines |
|
||||||
|
@ -33,7 +31,6 @@ Below are the adopters of project Spark Operator. If you are using Spark Operato
|
||||||
| [Molex](https://www.molex.com/) | @AshishPushpSingh | Evaluation/Production | Data Platform |
|
| [Molex](https://www.molex.com/) | @AshishPushpSingh | Evaluation/Production | Data Platform |
|
||||||
| [MongoDB](https://www.mongodb.com) | @chickenpopcorn | Production | Data Infrastructure |
|
| [MongoDB](https://www.mongodb.com) | @chickenpopcorn | Production | Data Infrastructure |
|
||||||
| Nielsen Identity Engine | @roitvt | Evaluation | Data pipelines |
|
| Nielsen Identity Engine | @roitvt | Evaluation | Data pipelines |
|
||||||
| [Ninja Van](https://tech.ninjavan.co/) | @hongshaoyang | Production | Data Infrastructure |
|
|
||||||
| [PUBG](https://careers.pubg.com/#/en/) | @jacobhjkim | Production | ML & Data Infrastructure |
|
| [PUBG](https://careers.pubg.com/#/en/) | @jacobhjkim | Production | ML & Data Infrastructure |
|
||||||
| [Qualytics](https://www.qualytics.co/) | @josecsotomorales | Production | Data Quality Platform |
|
| [Qualytics](https://www.qualytics.co/) | @josecsotomorales | Production | Data Quality Platform |
|
||||||
| Riskified | @henbh | Evaluation | Analytics Data Platform |
|
| Riskified | @henbh | Evaluation | Analytics Data Platform |
|
||||||
|
|
723
CHANGELOG.md
723
CHANGELOG.md
|
@ -1,723 +0,0 @@
|
||||||
# Changelog
|
|
||||||
|
|
||||||
## [v2.2.1](https://github.com/kubeflow/spark-operator/tree/v2.2.1) (2025-06-27)
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
- Customize ingress URL with Spark application ID ([#2554](https://github.com/kubeflow/spark-operator/pull/2554) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Make default ingress tls and annotations congurable in the helm config ([#2513](https://github.com/kubeflow/spark-operator/pull/2513) by [@Tom-Newton](https://github.com/Tom-Newton))
|
|
||||||
- Use code-generator for clientset, informers, listers ([#2563](https://github.com/kubeflow/spark-operator/pull/2563) by [@jbhalodia-slack](https://github.com/jbhalodia-slack))
|
|
||||||
|
|
||||||
### Misc
|
|
||||||
|
|
||||||
- add driver ingress unit tests ([#2552](https://github.com/kubeflow/spark-operator/pull/2552) by [@nabuskey](https://github.com/nabuskey))
|
|
||||||
- Get logger from context ([#2551](https://github.com/kubeflow/spark-operator/pull/2551) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Update golangci lint ([#2560](https://github.com/kubeflow/spark-operator/pull/2560) by [@joshuacuellar1](https://github.com/joshuacuellar1))
|
|
||||||
|
|
||||||
### Dependencies
|
|
||||||
|
|
||||||
- Bump aquasecurity/trivy-action from 0.30.0 to 0.31.0 ([#2557](https://github.com/kubeflow/spark-operator/pull/2557) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/prometheus/client_golang from 1.21.1 to 1.22.0 ([#2548](https://github.com/kubeflow/spark-operator/pull/2548) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump sigs.k8s.io/scheduler-plugins from 0.30.6 to 0.31.8 ([#2549](https://github.com/kubeflow/spark-operator/pull/2549) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/mod from 0.24.0 to 0.25.0 ([#2566](https://github.com/kubeflow/spark-operator/pull/2566) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/go-logr/logr from 1.4.2 to 1.4.3 ([#2567](https://github.com/kubeflow/spark-operator/pull/2567) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
|
|
||||||
## [v2.2.0](https://github.com/kubeflow/spark-operator/tree/v2.2.0) (2025-05-29)
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
- Upgrade to Spark 3.5.5 ([#2490](https://github.com/kubeflow/spark-operator/pull/2490) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Add timeZone to ScheduledSparkApplication ([#2471](https://github.com/kubeflow/spark-operator/pull/2471) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Enable the override of MemoryLimit through webhook ([#2478](https://github.com/kubeflow/spark-operator/pull/2478) by [@danielrsfreitas](https://github.com/danielrsfreitas))
|
|
||||||
- Add ShuffleTrackingEnabled to DynamicAllocation struct to allow disabling shuffle tracking ([#2511](https://github.com/kubeflow/spark-operator/pull/2511) by [@jbhalodia-slack](https://github.com/jbhalodia-slack))
|
|
||||||
- Define SparkApplicationSubmitter interface to allow customizing submitting mechanism ([#2500](https://github.com/kubeflow/spark-operator/pull/2500) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Add support for using cert manager to generate webhook certificates ([#2373](https://github.com/kubeflow/spark-operator/pull/2373) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
- fix: add webhook cert validity checking ([#2489](https://github.com/kubeflow/spark-operator/pull/2489) by [@teejaded](https://github.com/teejaded))
|
|
||||||
- fix and add back unit tests ([#2532](https://github.com/kubeflow/spark-operator/pull/2532) by [@nabuskey](https://github.com/nabuskey))
|
|
||||||
- fix volcano tests ([#2533](https://github.com/kubeflow/spark-operator/pull/2533) by [@nabuskey](https://github.com/nabuskey))
|
|
||||||
- Add v2 to module path ([#2515](https://github.com/kubeflow/spark-operator/pull/2515) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- #2525 spark metrics in depends on prometheus ([#2529](https://github.com/kubeflow/spark-operator/pull/2529) by [@blcksrx](https://github.com/blcksrx))
|
|
||||||
|
|
||||||
### Misc
|
|
||||||
|
|
||||||
- Add APRA AMCOS to adopters ([#2485](https://github.com/kubeflow/spark-operator/pull/2485) by [@shuch3ng](https://github.com/shuch3ng))
|
|
||||||
- Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 ([#2488](https://github.com/kubeflow/spark-operator/pull/2488) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/prometheus/client_golang from 1.20.5 to 1.21.1 ([#2487](https://github.com/kubeflow/spark-operator/pull/2487) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump sigs.k8s.io/controller-runtime from 0.20.1 to 0.20.4 ([#2486](https://github.com/kubeflow/spark-operator/pull/2486) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Deprecating sparkctl ([#2484](https://github.com/kubeflow/spark-operator/pull/2484) by [@vikas-saxena02](https://github.com/vikas-saxena02))
|
|
||||||
- Changing image repo from docker.io to ghcr.io ([#2483](https://github.com/kubeflow/spark-operator/pull/2483) by [@vikas-saxena02](https://github.com/vikas-saxena02))
|
|
||||||
- Upgrade Golang to 1.24.1 and golangci-lint to 1.64.8 ([#2494](https://github.com/kubeflow/spark-operator/pull/2494) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Bump helm.sh/helm/v3 from 3.16.2 to 3.17.3 ([#2503](https://github.com/kubeflow/spark-operator/pull/2503) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Add changelog for v2.1.1 ([#2504](https://github.com/kubeflow/spark-operator/pull/2504) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Remove sparkctl ([#2466](https://github.com/kubeflow/spark-operator/pull/2466) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Bump github.com/spf13/viper from 1.19.0 to 1.20.1 ([#2496](https://github.com/kubeflow/spark-operator/pull/2496) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/net from 0.37.0 to 0.38.0 ([#2505](https://github.com/kubeflow/spark-operator/pull/2505) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Remove clientset, informer and listers generated by code-generator ([#2506](https://github.com/kubeflow/spark-operator/pull/2506) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Remove v1beta1 API ([#2516](https://github.com/kubeflow/spark-operator/pull/2516) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- add unit tests for driver and executor configs ([#2521](https://github.com/kubeflow/spark-operator/pull/2521) by [@nabuskey](https://github.com/nabuskey))
|
|
||||||
- Adding securityContext to spark examples ([#2530](https://github.com/kubeflow/spark-operator/pull/2530) by [@tarekabouzeid](https://github.com/tarekabouzeid))
|
|
||||||
- Bump github.com/spf13/cobra from 1.8.1 to 1.9.1 ([#2497](https://github.com/kubeflow/spark-operator/pull/2497) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/mod from 0.23.0 to 0.24.0 ([#2495](https://github.com/kubeflow/spark-operator/pull/2495) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Adding Manabu to the reviewers ([#2522](https://github.com/kubeflow/spark-operator/pull/2522) by [@vara-bonthu](https://github.com/vara-bonthu))
|
|
||||||
- Bump manusa/actions-setup-minikube from 2.13.1 to 2.14.0 ([#2523](https://github.com/kubeflow/spark-operator/pull/2523) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump k8s.io dependencies to v0.32.5 ([#2540](https://github.com/kubeflow/spark-operator/pull/2540) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Pass the correct LDFLAGS when building the operator image ([#2541](https://github.com/kubeflow/spark-operator/pull/2541) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/v2.1.1...v2.2.0)
|
|
||||||
|
|
||||||
## [v2.1.1](https://github.com/kubeflow/spark-operator/tree/v2.1.1) (2025-03-21)
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
- Adding seccompProfile RuntimeDefault ([#2397](https://github.com/kubeflow/spark-operator/pull/2397) by [@tarekabouzeid](https://github.com/tarekabouzeid))
|
|
||||||
- Add option for disabling leader election ([#2423](https://github.com/kubeflow/spark-operator/pull/2423) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Controller should only be granted event permissions in spark job namespaces ([#2426](https://github.com/kubeflow/spark-operator/pull/2426) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Make image optional ([#2439](https://github.com/kubeflow/spark-operator/pull/2439) by [@jbhalodia-slack](https://github.com/jbhalodia-slack))
|
|
||||||
- Support non-standard Spark container names ([#2441](https://github.com/kubeflow/spark-operator/pull/2441) by [@jbhalodia-slack](https://github.com/jbhalodia-slack))
|
|
||||||
- add support for metrics-job-start-latency-buckets flag in helm ([#2450](https://github.com/kubeflow/spark-operator/pull/2450) by [@nabuskey](https://github.com/nabuskey))
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
- fix: webhook fail to add lifecycle to Spark3 executor pods ([#2458](https://github.com/kubeflow/spark-operator/pull/2458) by [@pvbouwel](https://github.com/pvbouwel))
|
|
||||||
- change env in executorSecretOption ([#2467](https://github.com/kubeflow/spark-operator/pull/2467) by [@TQJADE](https://github.com/TQJADE))
|
|
||||||
|
|
||||||
### Misc
|
|
||||||
|
|
||||||
- Move sparkctl to cmd directory ([#2347](https://github.com/kubeflow/spark-operator/pull/2347) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Bump golang.org/x/net from 0.30.0 to 0.32.0 ([#2350](https://github.com/kubeflow/spark-operator/pull/2350) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/crypto from 0.30.0 to 0.31.0 ([#2365](https://github.com/kubeflow/spark-operator/pull/2365) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- add an example of using prometheus servlet ([#2403](https://github.com/kubeflow/spark-operator/pull/2403) by [@nabuskey](https://github.com/nabuskey))
|
|
||||||
- Remove dependency on `k8s.io/kubernetes` ([#2398](https://github.com/kubeflow/spark-operator/pull/2398) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- fix make deploy and install ([#2412](https://github.com/kubeflow/spark-operator/pull/2412) by [@nabuskey](https://github.com/nabuskey))
|
|
||||||
- Add helm unittest step to integration test workflow ([#2424](https://github.com/kubeflow/spark-operator/pull/2424) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- ensure passed context is used ([#2432](https://github.com/kubeflow/spark-operator/pull/2432) by [@nabuskey](https://github.com/nabuskey))
|
|
||||||
- Bump manusa/actions-setup-minikube from 2.13.0 to 2.13.1 ([#2390](https://github.com/kubeflow/spark-operator/pull/2390) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump helm/chart-testing-action from 2.6.1 to 2.7.0 ([#2391](https://github.com/kubeflow/spark-operator/pull/2391) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/mod from 0.21.0 to 0.23.0 ([#2427](https://github.com/kubeflow/spark-operator/pull/2427) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/golang/glog from 1.2.2 to 1.2.4 ([#2411](https://github.com/kubeflow/spark-operator/pull/2411) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/net from 0.32.0 to 0.35.0 ([#2428](https://github.com/kubeflow/spark-operator/pull/2428) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Support Kubernetes 1.32 ([#2416](https://github.com/kubeflow/spark-operator/pull/2416) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- use cmd context in sparkctl ([#2447](https://github.com/kubeflow/spark-operator/pull/2447) by [@nabuskey](https://github.com/nabuskey))
|
|
||||||
- Bump golang.org/x/net from 0.35.0 to 0.36.0 ([#2470](https://github.com/kubeflow/spark-operator/pull/2470) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump aquasecurity/trivy-action from 0.29.0 to 0.30.0 ([#2475](https://github.com/kubeflow/spark-operator/pull/2475) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/net from 0.35.0 to 0.37.0 ([#2472](https://github.com/kubeflow/spark-operator/pull/2472) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/containerd/containerd from 1.7.19 to 1.7.27 ([#2476](https://github.com/kubeflow/spark-operator/pull/2476) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump k8s.io/apimachinery from 0.32.0 to 0.32.3 ([#2474](https://github.com/kubeflow/spark-operator/pull/2474) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.66.0 to 1.78.2 ([#2473](https://github.com/kubeflow/spark-operator/pull/2473) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2/config from 1.28.0 to 1.29.9 ([#2463](https://github.com/kubeflow/spark-operator/pull/2463) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump sigs.k8s.io/scheduler-plugins from 0.29.8 to 0.30.6 ([#2444](https://github.com/kubeflow/spark-operator/pull/2444) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/v2.1.0...v2.1.1)
|
|
||||||
|
|
||||||
## [v2.1.0](https://github.com/kubeflow/spark-operator/tree/v2.1.0) (2024-12-06)
|
|
||||||
|
|
||||||
### New Features
|
|
||||||
|
|
||||||
- Upgrade to Spark 3.5.3 ([#2202](https://github.com/kubeflow/spark-operator/pull/2202) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- feat: support archives param for spark-submit ([#2256](https://github.com/kubeflow/spark-operator/pull/2256) by [@kaka-zb](https://github.com/kaka-zb))
|
|
||||||
- Allow --ingress-class-name to be specified in chart ([#2278](https://github.com/kubeflow/spark-operator/pull/2278) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Update default container security context ([#2265](https://github.com/kubeflow/spark-operator/pull/2265) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Support pod template for Spark 3.x applications ([#2141](https://github.com/kubeflow/spark-operator/pull/2141) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Allow setting automountServiceAccountToken ([#2298](https://github.com/kubeflow/spark-operator/pull/2298) by [@Aranch](https://github.com/Aransh))
|
|
||||||
- Allow the Controller and Webhook Containers to run with the securityContext: readOnlyRootfilesystem: true ([#2282](https://github.com/kubeflow/spark-operator/pull/2282) by [@npgretz](https://github.com/npgretz))
|
|
||||||
- Use NSS_WRAPPER_PASSWD instead of /etc/passwd as in spark-operator image entrypoint.sh ([#2312](https://github.com/kubeflow/spark-operator/pull/2312) by [@Aakcht](https://github.com/Aakcht))
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
- Minor fixes to e2e test `make` targets ([#2242](https://github.com/kubeflow/spark-operator/pull/2242) by [@Tom-Newton](https://github.com/Tom-Newton))
|
|
||||||
- Added off heap memory to calculation for YuniKorn gang scheduling ([#2209](https://github.com/kubeflow/spark-operator/pull/2209) by [@guangyu-yang-rokt](https://github.com/guangyu-yang-rokt))
|
|
||||||
- Add permissions to controller serviceaccount to list and watch ingresses ([#2246](https://github.com/kubeflow/spark-operator/pull/2246) by [@tcassaert](https://github.com/tcassaert))
|
|
||||||
- Make sure enable-ui-service flag is set to false when controller.uiService.enable is set to false ([#2261](https://github.com/kubeflow/spark-operator/pull/2261) by [@Roberdvs](https://github.com/Roberdvs))
|
|
||||||
- `omitempty` corrections ([#2255](https://github.com/kubeflow/spark-operator/pull/2255) by [@Tom-Newton](https://github.com/Tom-Newton))
|
|
||||||
- Fix retries ([#2241](https://github.com/kubeflow/spark-operator/pull/2241) by [@Tom-Newton](https://github.com/Tom-Newton))
|
|
||||||
- Fix: executor container security context does not work ([#2306](https://github.com/kubeflow/spark-operator/pull/2306) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Fix: should not add emptyDir sizeLimit conf if it is nil ([#2305](https://github.com/kubeflow/spark-operator/pull/2305) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Fix: should not add emptyDir sizeLimit conf on executor pods if it is nil ([#2316](https://github.com/kubeflow/spark-operator/pull/2316) by [@Cian911](https://github.com/Cian911))
|
|
||||||
- Truncate UI service name if over 63 characters ([#2311](https://github.com/kubeflow/spark-operator/pull/2311) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- The webhook-key-name command-line param isn't taking effect ([#2344](https://github.com/kubeflow/spark-operator/pull/2344) by [@c-h-afzal](https://github.com/c-h-afzal))
|
|
||||||
- Robustness to driver pod taking time to create ([#2315](https://github.com/kubeflow/spark-operator/pull/2315) by [@Tom-Newton](https://github.com/Tom-Newton))
|
|
||||||
|
|
||||||
### Misc
|
|
||||||
|
|
||||||
- remove redundant test.sh file ([#2243](https://github.com/kubeflow/spark-operator/pull/2243) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2/config from 1.27.42 to 1.27.43 ([#2252](https://github.com/kubeflow/spark-operator/pull/2252) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump manusa/actions-setup-minikube from 2.12.0 to 2.13.0 ([#2247](https://github.com/kubeflow/spark-operator/pull/2247) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/net from 0.29.0 to 0.30.0 ([#2251](https://github.com/kubeflow/spark-operator/pull/2251) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump aquasecurity/trivy-action from 0.24.0 to 0.27.0 ([#2248](https://github.com/kubeflow/spark-operator/pull/2248) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump gocloud.dev from 0.39.0 to 0.40.0 ([#2250](https://github.com/kubeflow/spark-operator/pull/2250) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Add Quick Start guide to README ([#2259](https://github.com/kubeflow/spark-operator/pull/2259) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.63.3 to 1.65.3 ([#2249](https://github.com/kubeflow/spark-operator/pull/2249) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Add release badge to README ([#2263](https://github.com/kubeflow/spark-operator/pull/2263) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Bump helm.sh/helm/v3 from 3.16.1 to 3.16.2 ([#2275](https://github.com/kubeflow/spark-operator/pull/2275) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/prometheus/client_golang from 1.20.4 to 1.20.5 ([#2274](https://github.com/kubeflow/spark-operator/pull/2274) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump cloud.google.com/go/storage from 1.44.0 to 1.45.0 ([#2273](https://github.com/kubeflow/spark-operator/pull/2273) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Run e2e tests with Kubernetes version matrix ([#2266](https://github.com/kubeflow/spark-operator/pull/2266) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Bump aquasecurity/trivy-action from 0.27.0 to 0.28.0 ([#2270](https://github.com/kubeflow/spark-operator/pull/2270) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.65.3 to 1.66.0 ([#2271](https://github.com/kubeflow/spark-operator/pull/2271) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2/config from 1.27.43 to 1.28.0 ([#2272](https://github.com/kubeflow/spark-operator/pull/2272) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Add workflow for releasing sparkctl binary ([#2264](https://github.com/kubeflow/spark-operator/pull/2264) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Bump `volcano.sh/apis` to 1.10.0 ([#2320](https://github.com/kubeflow/spark-operator/pull/2320) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Bump aquasecurity/trivy-action from 0.28.0 to 0.29.0 ([#2332](https://github.com/kubeflow/spark-operator/pull/2332) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/onsi/ginkgo/v2 from 2.20.2 to 2.22.0 ([#2335](https://github.com/kubeflow/spark-operator/pull/2335) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Move sparkctl to cmd directory ([#2347](https://github.com/kubeflow/spark-operator/pull/2347) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/a8b5d6...v2.1.0 )
|
|
||||||
|
|
||||||
## [v2.0.2](https://github.com/kubeflow/spark-operator/tree/v2.0.2) (2024-10-10)
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
- Fix ingress capability discovery ([#2201](https://github.com/kubeflow/spark-operator/pull/2201) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- fix: imagePullPolicy was ignored ([#2222](https://github.com/kubeflow/spark-operator/pull/2222) by [@missedone](https://github.com/missedone))
|
|
||||||
- fix: spark-submission failed due to lack of permission by user `spark` ([#2223](https://github.com/kubeflow/spark-operator/pull/2223) by [@missedone](https://github.com/missedone))
|
|
||||||
- Remove `cap_net_bind_service` from image ([#2216](https://github.com/kubeflow/spark-operator/pull/2216) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- fix: webhook panics due to logging ([#2232](https://github.com/kubeflow/spark-operator/pull/2232) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
### Misc
|
|
||||||
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2 from 1.30.5 to 1.31.0 ([#2207](https://github.com/kubeflow/spark-operator/pull/2207) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/net from 0.28.0 to 0.29.0 ([#2205](https://github.com/kubeflow/spark-operator/pull/2205) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/docker/docker from 27.0.3+incompatible to 27.1.1+incompatible ([#2125](https://github.com/kubeflow/spark-operator/pull/2125) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.58.3 to 1.63.3 ([#2206](https://github.com/kubeflow/spark-operator/pull/2206) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Update integration test workflow and add golangci lint check ([#2197](https://github.com/kubeflow/spark-operator/pull/2197) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2 from 1.31.0 to 1.32.0 ([#2229](https://github.com/kubeflow/spark-operator/pull/2229) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump cloud.google.com/go/storage from 1.43.0 to 1.44.0 ([#2228](https://github.com/kubeflow/spark-operator/pull/2228) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump manusa/actions-setup-minikube from 2.11.0 to 2.12.0 ([#2226](https://github.com/kubeflow/spark-operator/pull/2226) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump golang.org/x/time from 0.6.0 to 0.7.0 ([#2227](https://github.com/kubeflow/spark-operator/pull/2227) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2/config from 1.27.33 to 1.27.42 ([#2231](https://github.com/kubeflow/spark-operator/pull/2231) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/prometheus/client_golang from 1.19.1 to 1.20.4 ([#2204](https://github.com/kubeflow/spark-operator/pull/2204) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Add check for generating manifests and code ([#2234](https://github.com/kubeflow/spark-operator/pull/2234) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/v2.0.1...v2.0.2)
|
|
||||||
|
|
||||||
## [v2.0.1](https://github.com/kubeflow/spark-operator/tree/v2.0.1) (2024-09-26)
|
|
||||||
|
|
||||||
### New Features
|
|
||||||
|
|
||||||
- FEATURE: build operator image as non-root ([#2171](https://github.com/kubeflow/spark-operator/pull/2171) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
- Update controller RBAC for ConfigMap and PersistentVolumeClaim ([#2187](https://github.com/kubeflow/spark-operator/pull/2187) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
### Misc
|
|
||||||
|
|
||||||
- Bump github.com/onsi/ginkgo/v2 from 2.19.0 to 2.20.2 ([#2188](https://github.com/kubeflow/spark-operator/pull/2188) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump github.com/onsi/gomega from 1.33.1 to 1.34.2 ([#2189](https://github.com/kubeflow/spark-operator/pull/2189) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/v2.0.0...v2.0.1)
|
|
||||||
|
|
||||||
## [v2.0.0](https://github.com/kubeflow/spark-operator/tree/v2.0.0) (2024-09-23)
|
|
||||||
|
|
||||||
### Breaking Changes
|
|
||||||
|
|
||||||
- Use controller-runtime to reconsturct spark operator ([#2072](https://github.com/kubeflow/spark-operator/pull/2072) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- feat: support driver and executor pod use different priority ([#2146](https://github.com/kubeflow/spark-operator/pull/2146) by [@Kevinz857](https://github.com/Kevinz857))
|
|
||||||
|
|
||||||
### New Features
|
|
||||||
|
|
||||||
- Support gang scheduling with Yunikorn ([#2107](https://github.com/kubeflow/spark-operator/pull/2107)) by [@jacobsalway](https://github.com/jacobsalway)
|
|
||||||
- Reintroduce option webhook.enable ([#2142](https://github.com/kubeflow/spark-operator/pull/2142) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Add default batch scheduler argument ([#2143](https://github.com/kubeflow/spark-operator/pull/2143) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Support extended kube-scheduler as batch scheduler ([#2136](https://github.com/kubeflow/spark-operator/pull/2136) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Set schedulerName to Yunikorn ([#2153](https://github.com/kubeflow/spark-operator/pull/2153) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Feature: Add pprof endpoint ([#2164](https://github.com/kubeflow/spark-operator/pull/2164) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
- fix: Add default values for namespaces to match usage descriptions ([#2128](https://github.com/kubeflow/spark-operator/pull/2128) by [@snappyyouth](https://github.com/snappyyouth))
|
|
||||||
- Fix: Spark role binding did not render properly when setting spark service account name ([#2135](https://github.com/kubeflow/spark-operator/pull/2135) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- fix: unable to set controller/webhook replicas to zero ([#2147](https://github.com/kubeflow/spark-operator/pull/2147) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Adding support for setting spark job namespaces to all namespaces ([#2123](https://github.com/kubeflow/spark-operator/pull/2123) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Fix: e2e test failes due to webhook not ready ([#2149](https://github.com/kubeflow/spark-operator/pull/2149) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- fix: webhook not working when settings spark job namespaces to empty ([#2163](https://github.com/kubeflow/spark-operator/pull/2163) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- fix: The logger had an odd number of arguments, making it panic ([#2166](https://github.com/kubeflow/spark-operator/pull/2166) by [@tcassaert](https://github.com/tcassaert))
|
|
||||||
- fix the make kind-delete-custer to avoid accidental kubeconfig deletion ([#2172](https://github.com/kubeflow/spark-operator/pull/2172) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
- Add specific error in log line when failed to create web UI service ([#2170](https://github.com/kubeflow/spark-operator/pull/2170) by [@tcassaert](https://github.com/tcassaert))
|
|
||||||
- Account for spark.executor.pyspark.memory in Yunikorn gang scheduling ([#2178](https://github.com/kubeflow/spark-operator/pull/2178) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Fix: spark application does not respect time to live seconds ([#2165](https://github.com/kubeflow/spark-operator/pull/2165) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
### Misc
|
|
||||||
|
|
||||||
- Update workflow and docs for releasing Spark operator ([#2089](https://github.com/kubeflow/spark-operator/pull/2089) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Fix broken integration test CI ([#2109](https://github.com/kubeflow/spark-operator/pull/2109) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Fix CI: environment variable BRANCH is missed ([#2111](https://github.com/kubeflow/spark-operator/pull/2111) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Update Makefile for building sparkctl ([#2119](https://github.com/kubeflow/spark-operator/pull/2119) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Update release workflow and docs ([#2121](https://github.com/kubeflow/spark-operator/pull/2121) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Run e2e tests on Kind ([#2148](https://github.com/kubeflow/spark-operator/pull/2148) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Upgrade to Go 1.23.1 ([#2155](https://github.com/kubeflow/spark-operator/pull/2155) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Upgrade to Spark 3.5.2 ([#2154](https://github.com/kubeflow/spark-operator/pull/2154) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Bump sigs.k8s.io/scheduler-plugins from 0.29.7 to 0.29.8 ([#2159](https://github.com/kubeflow/spark-operator/pull/2159) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump gocloud.dev from 0.37.0 to 0.39.0 ([#2160](https://github.com/kubeflow/spark-operator/pull/2160) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Update e2e tests ([#2161](https://github.com/kubeflow/spark-operator/pull/2161) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Upgrade to Spark 3.5.2(#2012) ([#2157](https://github.com/kubeflow/spark-operator/pull/2157) by [@ha2hi](https://github.com/ha2hi))
|
|
||||||
- Bump github.com/aws/aws-sdk-go-v2/config from 1.27.27 to 1.27.33 ([#2174](https://github.com/kubeflow/spark-operator/pull/2174) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- Bump helm.sh/helm/v3 from 3.15.3 to 3.16.1 ([#2173](https://github.com/kubeflow/spark-operator/pull/2173) by [@dependabot[bot]](https://github.com/apps/dependabot))
|
|
||||||
- implement workflow to scan latest released docker image ([#2177](https://github.com/kubeflow/spark-operator/pull/2177) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
|
|
||||||
## What's Changed
|
|
||||||
|
|
||||||
- Cherry pick #2081 #2046 #2091 #2072 by @ChenYi015 in <https://github.com/kubeflow/spark-operator/pull/2108>
|
|
||||||
- Cherry pick #2089 #2109 #2111 by @ChenYi015 in <https://github.com/kubeflow/spark-operator/pull/2110>
|
|
||||||
- Release v2.0.0-rc.0 by @ChenYi015 in <https://github.com/kubeflow/spark-operator/pull/2115>
|
|
||||||
- Cherry pick commits for releasing v2.0.0 by @ChenYi015 in <https://github.com/kubeflow/spark-operator/pull/2156>
|
|
||||||
- Release v2.0.0 by @ChenYi015 in <https://github.com/kubeflow/spark-operator/pull/2182>
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/v1beta2-1.6.2-3.5.0...v2.0.0)
|
|
||||||
|
|
||||||
## [v2.0.0-rc.0](https://github.com/kubeflow/spark-operator/tree/v2.0.0-rc.0) (2024-08-09)
|
|
||||||
|
|
||||||
### Breaking Changes
|
|
||||||
|
|
||||||
- Use controller-runtime to reconsturct spark operator ([#2072](https://github.com/kubeflow/spark-operator/pull/2072) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
### Misc
|
|
||||||
|
|
||||||
- Fix CI: environment variable BRANCH is missed ([#2111](https://github.com/kubeflow/spark-operator/pull/2111) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Fix broken integration test CI ([#2109](https://github.com/kubeflow/spark-operator/pull/2109) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
- Update workflow and docs for releasing Spark operator ([#2089](https://github.com/kubeflow/spark-operator/pull/2089) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
### What's Changed
|
|
||||||
|
|
||||||
- Release v2.0.0-rc.0 ([#2115](https://github.com/kubeflow/spark-operator/pull/2115) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Cherry pick #2089 #2109 #2111 ([#2110](https://github.com/kubeflow/spark-operator/pull/2110) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Cherry pick #2081 #2046 #2091 #2072 ([#2108](https://github.com/kubeflow/spark-operator/pull/2108) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.3...v2.0.0-rc.0)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.4.6](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.6) (2024-07-26)
|
|
||||||
|
|
||||||
- Add topologySpreadConstraints ([#2091](https://github.com/kubeflow/spark-operator/pull/2091) by [@jbhalodia-slack](https://github.com/jbhalodia-slack))
|
|
||||||
- Add Alibaba Cloud to adopters ([#2097](https://github.com/kubeflow/spark-operator/pull/2097) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Update Stale bot settings ([#2095](https://github.com/kubeflow/spark-operator/pull/2095) by [@andreyvelich](https://github.com/andreyvelich))
|
|
||||||
- Add @ChenYi015 to approvers ([#2096](https://github.com/kubeflow/spark-operator/pull/2096) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Add CHANGELOG.md file and use python script to generate it automatically ([#2087](https://github.com/kubeflow/spark-operator/pull/2087) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.5...spark-operator-chart-1.4.6)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.4.5](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.5) (2024-07-22)
|
|
||||||
|
|
||||||
- Update the process to build api-docs, generate CRD manifests and code ([#2046](https://github.com/kubeflow/spark-operator/pull/2046) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Add workflow for closing stale issues and PRs ([#2073](https://github.com/kubeflow/spark-operator/pull/2073) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.4...spark-operator-chart-1.4.5)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.4.4](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.4) (2024-07-22)
|
|
||||||
|
|
||||||
- Update helm docs ([#2081](https://github.com/kubeflow/spark-operator/pull/2081) by [@csp33](https://github.com/csp33))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.3...spark-operator-chart-1.4.4)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.4.3](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.3) (2024-07-03)
|
|
||||||
|
|
||||||
- Add PodDisruptionBudget to chart ([#2078](https://github.com/kubeflow/spark-operator/pull/2078) by [@csp33](https://github.com/csp33))
|
|
||||||
- Update README and documentation ([#2047](https://github.com/kubeflow/spark-operator/pull/2047) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Add code of conduct and update contributor guide ([#2074](https://github.com/kubeflow/spark-operator/pull/2074) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Remove .gitlab-ci.yml ([#2069](https://github.com/kubeflow/spark-operator/pull/2069) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Modified README.MD as per changes discussed on <https://github.com/kubeflow/spark-operator/pull/2062> ([#2066](https://github.com/kubeflow/spark-operator/pull/2066) by [@vikas-saxena02](https://github.com/vikas-saxena02))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.2...spark-operator-chart-1.4.3)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.4.2](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.2) (2024-06-17)
|
|
||||||
|
|
||||||
- Support objectSelector on mutating webhook ([#2058](https://github.com/kubeflow/spark-operator/pull/2058) by [@Cian911](https://github.com/Cian911))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.1...spark-operator-chart-1.4.2)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.4.1](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.1) (2024-06-15)
|
|
||||||
|
|
||||||
- Adding an option to set the priority class for spark-operator pod ([#2043](https://github.com/kubeflow/spark-operator/pull/2043) by [@pkgajulapalli](https://github.com/pkgajulapalli))
|
|
||||||
- Update minikube version in CI ([#2059](https://github.com/kubeflow/spark-operator/pull/2059) by [@Cian911](https://github.com/Cian911))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.0...spark-operator-chart-1.4.1)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.4.0](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.0) (2024-06-05)
|
|
||||||
|
|
||||||
- Certifictes are generated by operator rather than gencerts.sh ([#2016](https://github.com/kubeflow/spark-operator/pull/2016) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Add ChenYi015 as spark-operator reviewer ([#2045](https://github.com/kubeflow/spark-operator/pull/2045) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.3.2...spark-operator-chart-1.4.0)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.3.2](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.3.2) (2024-06-05)
|
|
||||||
|
|
||||||
- Bump appVersion to v1beta2-1.5.0-3.5.0 ([#2044](https://github.com/kubeflow/spark-operator/pull/2044) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- Add restartPolicy field to SparkApplication Driver/Executor initContainers CRDs ([#2022](https://github.com/kubeflow/spark-operator/pull/2022) by [@mschroering](https://github.com/mschroering))
|
|
||||||
- :memo: Add Inter&Co to who-is-using.md ([#2040](https://github.com/kubeflow/spark-operator/pull/2040) by [@ignitz](https://github.com/ignitz))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.3.1...spark-operator-chart-1.3.2)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.3.1](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.3.1) (2024-05-31)
|
|
||||||
|
|
||||||
- Chart: add POD_NAME env for leader election ([#2039](https://github.com/kubeflow/spark-operator/pull/2039) by [@Aakcht](https://github.com/Aakcht))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.3.0...spark-operator-chart-1.3.1)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.3.0](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.3.0) (2024-05-20)
|
|
||||||
|
|
||||||
- Support exposing extra TCP ports in Spark Driver via K8s Ingress ([#1998](https://github.com/kubeflow/spark-operator/pull/1998) by [@hiboyang](https://github.com/hiboyang))
|
|
||||||
- Fixes a bug with dynamic allocation forcing the executor count to be 1 even when minExecutors is set to 0 ([#1979](https://github.com/kubeflow/spark-operator/pull/1979) by [@peter-mcclonski](https://github.com/peter-mcclonski))
|
|
||||||
- Remove outdated PySpark experimental warning in example ([#2014](https://github.com/kubeflow/spark-operator/pull/2014) by [@andrejpk](https://github.com/andrejpk))
|
|
||||||
- Update Spark Job Namespace docs ([#2000](https://github.com/kubeflow/spark-operator/pull/2000) by [@matthewrossi](https://github.com/matthewrossi))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.15...spark-operator-chart-1.3.0)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.2.15](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.15) (2024-05-07)
|
|
||||||
|
|
||||||
- Fix examples ([#2010](https://github.com/kubeflow/spark-operator/pull/2010) by [@peter-mcclonski](https://github.com/peter-mcclonski))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.14...spark-operator-chart-1.2.15)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.2.14](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.14) (2024-04-26)
|
|
||||||
|
|
||||||
- feat: add support for service labels on driver-svc ([#1985](https://github.com/kubeflow/spark-operator/pull/1985) by [@Cian911](https://github.com/Cian911))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.13...spark-operator-chart-1.2.14)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.2.13](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.13) (2024-04-24)
|
|
||||||
|
|
||||||
- fix(chart): remove operator namespace default for job namespaces value ([#1989](https://github.com/kubeflow/spark-operator/pull/1989) by [@t3mi](https://github.com/t3mi))
|
|
||||||
- Fix Docker Hub Credentials in CI ([#2003](https://github.com/kubeflow/spark-operator/pull/2003) by [@andreyvelich](https://github.com/andreyvelich))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.12...spark-operator-chart-1.2.13)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.2.12](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.12) (2024-04-19)
|
|
||||||
|
|
||||||
- Add emptyDir sizeLimit support for local dirs ([#1993](https://github.com/kubeflow/spark-operator/pull/1993) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- fix: Removed `publish-image` dependency on publishing the helm chart ([#1995](https://github.com/kubeflow/spark-operator/pull/1995) by [@vara-bonthu](https://github.com/vara-bonthu))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.11...spark-operator-chart-1.2.12)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.2.11](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.11) (2024-04-19)
|
|
||||||
|
|
||||||
- fix: Update Github workflow to publish Helm charts on chart changes, irrespective of image change ([#1992](https://github.com/kubeflow/spark-operator/pull/1992) by [@vara-bonthu](https://github.com/vara-bonthu))
|
|
||||||
- chore: Add Timo to user list ([#1615](https://github.com/kubeflow/spark-operator/pull/1615) by [@vanducng](https://github.com/vanducng))
|
|
||||||
- Update spark operator permissions for CRD ([#1973](https://github.com/kubeflow/spark-operator/pull/1973) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- fix spark-rbac ([#1986](https://github.com/kubeflow/spark-operator/pull/1986) by [@Aransh](https://github.com/Aransh))
|
|
||||||
- Use Kubeflow Docker Hub for Spark Operator Image ([#1974](https://github.com/kubeflow/spark-operator/pull/1974) by [@andreyvelich](https://github.com/andreyvelich))
|
|
||||||
- fix: fixed serviceaccount annotations ([#1972](https://github.com/kubeflow/spark-operator/pull/1972) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.7...spark-operator-chart-1.2.11)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.2.7](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.7) (2024-04-16)
|
|
||||||
|
|
||||||
- fix: upgraded k8s deps ([#1983](https://github.com/kubeflow/spark-operator/pull/1983) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk))
|
|
||||||
- chore: remove k8s.io/kubernetes replaces and adapt to v1.29.3 apis ([#1968](https://github.com/kubeflow/spark-operator/pull/1968) by [@ajayk](https://github.com/ajayk))
|
|
||||||
- Add some helm chart unit tests and fix spark service account render failure when extra annotations are specified ([#1967](https://github.com/kubeflow/spark-operator/pull/1967) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- feat: Doc updates, Issue and PR templates are added ([#1970](https://github.com/kubeflow/spark-operator/pull/1970) by [@vara-bonthu](https://github.com/vara-bonthu))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.5...spark-operator-chart-1.2.7)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.2.5](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.5) (2024-04-14)
|
|
||||||
|
|
||||||
- fixed docker image tag and updated chart docs ([#1969](https://github.com/kubeflow/spark-operator/pull/1969) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.4...spark-operator-chart-1.2.5)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.2.4](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.4) (2024-04-13)
|
|
||||||
|
|
||||||
- publish chart independently, incremented both chart and image versions to trigger build of both ([#1964](https://github.com/kubeflow/spark-operator/pull/1964) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk))
|
|
||||||
- Update helm chart README ([#1958](https://github.com/kubeflow/spark-operator/pull/1958) by [@ChenYi015](https://github.com/ChenYi015))
|
|
||||||
- fix: add containerPort declaration for webhook in helm chart ([#1961](https://github.com/kubeflow/spark-operator/pull/1961) by [@zevisert](https://github.com/zevisert))
|
|
||||||
- added id for a build job to fix digests artifact creation ([#1963](https://github.com/kubeflow/spark-operator/pull/1963) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk))
|
|
||||||
- support multiple namespaces ([#1955](https://github.com/kubeflow/spark-operator/pull/1955) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk))
|
|
||||||
- chore: replace GoogleCloudPlatform/spark-on-k8s-operator with kubeflow/spark-operator ([#1937](https://github.com/kubeflow/spark-operator/pull/1937) by [@zevisert](https://github.com/zevisert))
|
|
||||||
- Chart: add patch permissions for spark operator SA to support spark 3.5.0 ([#1884](https://github.com/kubeflow/spark-operator/pull/1884) by [@Aakcht](https://github.com/Aakcht))
|
|
||||||
- Cleanup after golang upgrade ([#1956](https://github.com/kubeflow/spark-operator/pull/1956) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk))
|
|
||||||
- feat: add support for custom service labels ([#1952](https://github.com/kubeflow/spark-operator/pull/1952) by [@Cian911](https://github.com/Cian911))
|
|
||||||
- upgraded golang and dependencies ([#1954](https://github.com/kubeflow/spark-operator/pull/1954) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk))
|
|
||||||
- README for installing operator using kustomize with custom namespace and service name ([#1778](https://github.com/kubeflow/spark-operator/pull/1778) by [@shahsiddharth08](https://github.com/shahsiddharth08))
|
|
||||||
- BUGFIX: Added cancel method to fix context leak ([#1917](https://github.com/kubeflow/spark-operator/pull/1917) by [@fazledyn-or](https://github.com/fazledyn-or))
|
|
||||||
- remove unmatched quotes from user-guide.md ([#1584](https://github.com/kubeflow/spark-operator/pull/1584) by [@taeyeopkim1](https://github.com/taeyeopkim1))
|
|
||||||
- Add PVC permission to Operator role ([#1889](https://github.com/kubeflow/spark-operator/pull/1889) by [@wyangsun](https://github.com/wyangsun))
|
|
||||||
- Allow to set webhook job resource limits (#1429,#1300) ([#1946](https://github.com/kubeflow/spark-operator/pull/1946) by [@karbyshevds](https://github.com/karbyshevds))
|
|
||||||
- Create OWNERS ([#1927](https://github.com/kubeflow/spark-operator/pull/1927) by [@zijianjoy](https://github.com/zijianjoy))
|
|
||||||
- fix: fix issue #1723 about spark-operator not working with volcano on OCP ([#1724](https://github.com/kubeflow/spark-operator/pull/1724) by [@disaster37](https://github.com/disaster37))
|
|
||||||
- Add Rokt to who-is-using.md ([#1867](https://github.com/kubeflow/spark-operator/pull/1867) by [@jacobsalway](https://github.com/jacobsalway))
|
|
||||||
- Handle invalid API resources in discovery ([#1758](https://github.com/kubeflow/spark-operator/pull/1758) by [@wiltonsr](https://github.com/wiltonsr))
|
|
||||||
- Fix docs for Volcano integration ([#1719](https://github.com/kubeflow/spark-operator/pull/1719) by [@VVKot](https://github.com/VVKot))
|
|
||||||
- Added qualytics to who is using ([#1736](https://github.com/kubeflow/spark-operator/pull/1736) by [@josecsotomorales](https://github.com/josecsotomorales))
|
|
||||||
- Allowing optional annotation on rbac ([#1770](https://github.com/kubeflow/spark-operator/pull/1770) by [@cxfcxf](https://github.com/cxfcxf))
|
|
||||||
- Support `seccompProfile` in Spark application CRD and fix pre-commit jobs ([#1768](https://github.com/kubeflow/spark-operator/pull/1768) by [@ordukhanian](https://github.com/ordukhanian))
|
|
||||||
- Updating webhook docs to also mention eks ([#1763](https://github.com/kubeflow/spark-operator/pull/1763) by [@JunaidChaudry](https://github.com/JunaidChaudry))
|
|
||||||
- Link to helm docs fixed ([#1783](https://github.com/kubeflow/spark-operator/pull/1783) by [@haron](https://github.com/haron))
|
|
||||||
- Improve getMasterURL() to add [] to IPv6 if needed ([#1825](https://github.com/kubeflow/spark-operator/pull/1825) by [@LittleWat](https://github.com/LittleWat))
|
|
||||||
- Add envFrom to operator deployment ([#1785](https://github.com/kubeflow/spark-operator/pull/1785) by [@matschaffer-roblox](https://github.com/matschaffer-roblox))
|
|
||||||
- Expand ingress docs a bit ([#1806](https://github.com/kubeflow/spark-operator/pull/1806) by [@matschaffer-roblox](https://github.com/matschaffer-roblox))
|
|
||||||
- Optional sidecars for operator pod ([#1754](https://github.com/kubeflow/spark-operator/pull/1754) by [@qq157755587](https://github.com/qq157755587))
|
|
||||||
- Add Roblox to who-is ([#1784](https://github.com/kubeflow/spark-operator/pull/1784) by [@matschaffer-roblox](https://github.com/matschaffer-roblox))
|
|
||||||
- Molex started using spark K8 operator. ([#1714](https://github.com/kubeflow/spark-operator/pull/1714) by [@AshishPushpSingh](https://github.com/AshishPushpSingh))
|
|
||||||
- Extra helm chart labels ([#1669](https://github.com/kubeflow/spark-operator/pull/1669) by [@kvanzuijlen](https://github.com/kvanzuijlen))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.27...spark-operator-chart-1.2.4)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.27](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.27) (2023-03-17)
|
|
||||||
|
|
||||||
- Added permissions for leader election #1635 ([#1647](https://github.com/kubeflow/spark-operator/pull/1647) by [@ordukhanian](https://github.com/ordukhanian))
|
|
||||||
- Fix #1393 : fix tolerations block in wrong segment for webhook jobs ([#1633](https://github.com/kubeflow/spark-operator/pull/1633) by [@zhiminglim](https://github.com/zhiminglim))
|
|
||||||
- add dependabot ([#1629](https://github.com/kubeflow/spark-operator/pull/1629) by [@monotek](https://github.com/monotek))
|
|
||||||
- Add support for `ephemeral.volumeClaimTemplate` in helm chart CRDs ([#1661](https://github.com/kubeflow/spark-operator/pull/1661) by [@ArshiAAkhavan](https://github.com/ArshiAAkhavan))
|
|
||||||
- Add Kognita to "Who is using" ([#1637](https://github.com/kubeflow/spark-operator/pull/1637) by [@claudino-kognita](https://github.com/claudino-kognita))
|
|
||||||
- add lifecycle to executor ([#1674](https://github.com/kubeflow/spark-operator/pull/1674) by [@tiechengsu](https://github.com/tiechengsu))
|
|
||||||
- Fix signal handling for non-leader processes ([#1680](https://github.com/kubeflow/spark-operator/pull/1680) by [@antonipp](https://github.com/antonipp))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.26...spark-operator-chart-1.1.27)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.26](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.26) (2022-10-25)
|
|
||||||
|
|
||||||
- update go to 1.19 + k8s.io libs to v0.25.3 ([#1630](https://github.com/kubeflow/spark-operator/pull/1630) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
- Update README - secrets and sidecars need mutating webhooks ([#1550](https://github.com/kubeflow/spark-operator/pull/1550) by [@djdillon](https://github.com/djdillon))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.25...spark-operator-chart-1.1.26)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.25](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.25) (2022-06-08)
|
|
||||||
|
|
||||||
- Webhook init and cleanup should respect nodeSelector ([#1545](https://github.com/kubeflow/spark-operator/pull/1545) by [@erikcw](https://github.com/erikcw))
|
|
||||||
- rename unit tests to integration tests in Makefile#integration-test ([#1539](https://github.com/kubeflow/spark-operator/pull/1539) by [@dcoliversun](https://github.com/dcoliversun))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.24...spark-operator-chart-1.1.25)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.24](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.24) (2022-06-01)
|
|
||||||
|
|
||||||
- Fix: use V1 api for CRDs for volcano integration ([#1540](https://github.com/kubeflow/spark-operator/pull/1540) by [@Aakcht](https://github.com/Aakcht))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.23...spark-operator-chart-1.1.24)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.23](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.23) (2022-05-18)
|
|
||||||
|
|
||||||
- fix: add pre-upgrade hook to rbac resources ([#1511](https://github.com/kubeflow/spark-operator/pull/1511) by [@cwyl02](https://github.com/cwyl02))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.22...spark-operator-chart-1.1.23)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.22](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.22) (2022-05-16)
|
|
||||||
|
|
||||||
- Fixes issue #1467 (issue when deleting SparkApplication without metrics server) ([#1530](https://github.com/kubeflow/spark-operator/pull/1530) by [@aneagoe](https://github.com/aneagoe))
|
|
||||||
- Implement --logs and --delete flags on 'sparkctl create' and a timeout on 'sparkctl log' to wait a pod startup ([#1506](https://github.com/kubeflow/spark-operator/pull/1506) by [@alaurentinoofficial](https://github.com/alaurentinoofficial))
|
|
||||||
- Fix Spark UI URL in app status ([#1518](https://github.com/kubeflow/spark-operator/pull/1518) by [@gtopper](https://github.com/gtopper))
|
|
||||||
- remove quotes from yaml file ([#1524](https://github.com/kubeflow/spark-operator/pull/1524) by [@zencircle](https://github.com/zencircle))
|
|
||||||
- Added missing manifest yaml, point the manifest to the right direction ([#1504](https://github.com/kubeflow/spark-operator/pull/1504) by [@RonZhang724](https://github.com/RonZhang724))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.21...spark-operator-chart-1.1.22)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.21](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.21) (2022-05-12)
|
|
||||||
|
|
||||||
- Ensure that driver is deleted prior to sparkapplication resubmission ([#1521](https://github.com/kubeflow/spark-operator/pull/1521) by [@khorshuheng](https://github.com/khorshuheng))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.20...spark-operator-chart-1.1.21)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.20](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.20) (2022-04-11)
|
|
||||||
|
|
||||||
- Add ingress-class-name controller flag ([#1482](https://github.com/kubeflow/spark-operator/pull/1482) by [@voyvodov](https://github.com/voyvodov))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.19...spark-operator-chart-1.1.20)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.19](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.19) (2022-02-14)
|
|
||||||
|
|
||||||
- Add Operator volumes and volumeMounts in chart ([#1475](https://github.com/kubeflow/spark-operator/pull/1475) by [@ocworld](https://github.com/ocworld))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.18...spark-operator-chart-1.1.19)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.18](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.18) (2022-02-13)
|
|
||||||
|
|
||||||
- Updated default registry to ghcr.io ([#1454](https://github.com/kubeflow/spark-operator/pull/1454) by [@aneagoe](https://github.com/aneagoe))
|
|
||||||
- Github actions workflow fix for Helm chart deployment ([#1456](https://github.com/kubeflow/spark-operator/pull/1456) by [@vara-bonthu](https://github.com/vara-bonthu))
|
|
||||||
- Kubernetes v1.22 extensions/v1beta1 API removal ([#1427](https://github.com/kubeflow/spark-operator/pull/1427) by [@aneagoe](https://github.com/aneagoe))
|
|
||||||
- Fixes an issue with github action in job build-spark-operator ([#1452](https://github.com/kubeflow/spark-operator/pull/1452) by [@aneagoe](https://github.com/aneagoe))
|
|
||||||
- use github container registry instead of gcr.io for releases ([#1422](https://github.com/kubeflow/spark-operator/pull/1422) by [@TomHellier](https://github.com/TomHellier))
|
|
||||||
- Fixes an error that was preventing the pods from being mutated ([#1421](https://github.com/kubeflow/spark-operator/pull/1421) by [@ssullivan](https://github.com/ssullivan))
|
|
||||||
- Make github actions more feature complete ([#1418](https://github.com/kubeflow/spark-operator/pull/1418) by [@TomHellier](https://github.com/TomHellier))
|
|
||||||
- Resolves an error when deploying the webhook where the k8s api indica… ([#1413](https://github.com/kubeflow/spark-operator/pull/1413) by [@ssullivan](https://github.com/ssullivan))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.15...spark-operator-chart-1.1.18)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.15](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.15) (2021-12-02)
|
|
||||||
|
|
||||||
- Add docker build to github action ([#1415](https://github.com/kubeflow/spark-operator/pull/1415) by [@TomHellier](https://github.com/TomHellier))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.14...spark-operator-chart-1.1.15)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.14](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.14) (2021-11-30)
|
|
||||||
|
|
||||||
- Updating API version of admissionregistration.k8s.io ([#1401](https://github.com/kubeflow/spark-operator/pull/1401) by [@sairamankumar2](https://github.com/sairamankumar2))
|
|
||||||
- Add C2FO to who is using ([#1391](https://github.com/kubeflow/spark-operator/pull/1391) by [@vanhoale](https://github.com/vanhoale))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.13...spark-operator-chart-1.1.14)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.13](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.13) (2021-11-18)
|
|
||||||
|
|
||||||
- delete-service-accounts-and-roles-before-creation ([#1384](https://github.com/kubeflow/spark-operator/pull/1384) by [@TiansuYu](https://github.com/TiansuYu))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.12...spark-operator-chart-1.1.13)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.12](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.12) (2021-11-14)
|
|
||||||
|
|
||||||
- webhook timeout variable ([#1387](https://github.com/kubeflow/spark-operator/pull/1387) by [@sairamankumar2](https://github.com/sairamankumar2))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.11...spark-operator-chart-1.1.12)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.11](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.11) (2021-11-12)
|
|
||||||
|
|
||||||
- [FIX] add service account access to persistentvolumeclaims ([#1390](https://github.com/kubeflow/spark-operator/pull/1390) by [@mschroering](https://github.com/mschroering))
|
|
||||||
- Add DeepCure to who is using ([#1389](https://github.com/kubeflow/spark-operator/pull/1389) by [@mschroering](https://github.com/mschroering))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.10...spark-operator-chart-1.1.11)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.10](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.10) (2021-11-09)
|
|
||||||
|
|
||||||
- Add custom toleration support for webhook jobs ([#1383](https://github.com/kubeflow/spark-operator/pull/1383) by [@korjek](https://github.com/korjek))
|
|
||||||
- fix container name in addsecuritycontext patch ([#1377](https://github.com/kubeflow/spark-operator/pull/1377) by [@lybavsky](https://github.com/lybavsky))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.9...spark-operator-chart-1.1.10)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.9](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.9) (2021-11-01)
|
|
||||||
|
|
||||||
- `Role` and `RoleBinding` not installed for `webhook-init` in Helm `pre-hook` ([#1379](https://github.com/kubeflow/spark-operator/pull/1379) by [@zzvara](https://github.com/zzvara))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.8...spark-operator-chart-1.1.9)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.8](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.8) (2021-10-26)
|
|
||||||
|
|
||||||
- Regenerate deleted cert after upgrade ([#1373](https://github.com/kubeflow/spark-operator/pull/1373) by [@simplylizz](https://github.com/simplylizz))
|
|
||||||
- Make manifests usable by Kustomize ([#1367](https://github.com/kubeflow/spark-operator/pull/1367) by [@karpoftea](https://github.com/karpoftea))
|
|
||||||
- #1329 update the operator to allow subpaths to be used with the spark ui ingress. ([#1330](https://github.com/kubeflow/spark-operator/pull/1330) by [@TomHellier](https://github.com/TomHellier))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.7...spark-operator-chart-1.1.8)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.7](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.7) (2021-10-21)
|
|
||||||
|
|
||||||
- serviceAccount annotations ([#1350](https://github.com/kubeflow/spark-operator/pull/1350) by [@moskitone](https://github.com/moskitone))
|
|
||||||
- Update Dockerfile ([#1369](https://github.com/kubeflow/spark-operator/pull/1369) by [@Sadagopan88](https://github.com/Sadagopan88))
|
|
||||||
- [FIX] tolerations are not directly present in Driver(/Executor)Spec ([#1365](https://github.com/kubeflow/spark-operator/pull/1365) by [@s-pedamallu](https://github.com/s-pedamallu))
|
|
||||||
- fix running metrics for application deletion ([#1358](https://github.com/kubeflow/spark-operator/pull/1358) by [@Aakcht](https://github.com/Aakcht))
|
|
||||||
- Update who-is-using.md ([#1338](https://github.com/kubeflow/spark-operator/pull/1338) by [@Juandavi1](https://github.com/Juandavi1))
|
|
||||||
- Update who-is-using.md ([#1082](https://github.com/kubeflow/spark-operator/pull/1082) by [@Juandavi1](https://github.com/Juandavi1))
|
|
||||||
- Add support for executor service account ([#1322](https://github.com/kubeflow/spark-operator/pull/1322) by [@bbenzikry](https://github.com/bbenzikry))
|
|
||||||
- fix NPE introduce on #1280 ([#1325](https://github.com/kubeflow/spark-operator/pull/1325) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.6...spark-operator-chart-1.1.7)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.6](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.6) (2021-08-04)
|
|
||||||
|
|
||||||
- Add hook deletion policy for spark-operator service account ([#1313](https://github.com/kubeflow/spark-operator/pull/1313) by [@pdrastil](https://github.com/pdrastil))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.5...spark-operator-chart-1.1.6)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.5](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.5) (2021-07-28)
|
|
||||||
|
|
||||||
- Add user defined pod labels ([#1288](https://github.com/kubeflow/spark-operator/pull/1288) by [@pdrastil](https://github.com/pdrastil))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.4...spark-operator-chart-1.1.5)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.4](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.4) (2021-07-25)
|
|
||||||
|
|
||||||
- Migrate CRDs from v1beta1 to v1. Add additionalPrinterColumns ([#1298](https://github.com/kubeflow/spark-operator/pull/1298) by [@drazul](https://github.com/drazul))
|
|
||||||
- Explain "signal: kill" errors during submission ([#1292](https://github.com/kubeflow/spark-operator/pull/1292) by [@zzvara](https://github.com/zzvara))
|
|
||||||
- fix the invalid repo address ([#1291](https://github.com/kubeflow/spark-operator/pull/1291) by [@william-wang](https://github.com/william-wang))
|
|
||||||
- add failure context to recordExecutorEvent ([#1280](https://github.com/kubeflow/spark-operator/pull/1280) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
- Update pythonVersion to fix example ([#1284](https://github.com/kubeflow/spark-operator/pull/1284) by [@stratus](https://github.com/stratus))
|
|
||||||
- add crds drift check between chart/ and manifest/ ([#1272](https://github.com/kubeflow/spark-operator/pull/1272) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.3...spark-operator-chart-1.1.4)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.3](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.3) (2021-05-25)
|
|
||||||
|
|
||||||
- Allow user to specify service annotation on Spark UI service ([#1264](https://github.com/kubeflow/spark-operator/pull/1264) by [@khorshuheng](https://github.com/khorshuheng))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.2...spark-operator-chart-1.1.3)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.2](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.2) (2021-05-25)
|
|
||||||
|
|
||||||
- implement shareProcessNamespace in SparkPodSpec ([#1262](https://github.com/kubeflow/spark-operator/pull/1262) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.1...spark-operator-chart-1.1.2)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.1](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.1) (2021-05-19)
|
|
||||||
|
|
||||||
- Enable UI service flag for disabling UI service ([#1261](https://github.com/kubeflow/spark-operator/pull/1261) by [@sairamankumar2](https://github.com/sairamankumar2))
|
|
||||||
- Add DiDi to who-is-using.md ([#1255](https://github.com/kubeflow/spark-operator/pull/1255) by [@Run-Lin](https://github.com/Run-Lin))
|
|
||||||
- doc: update who is using page ([#1251](https://github.com/kubeflow/spark-operator/pull/1251) by [@luizm](https://github.com/luizm))
|
|
||||||
- Add Tongdun under who-is-using ([#1249](https://github.com/kubeflow/spark-operator/pull/1249) by [@lomoJG](https://github.com/lomoJG))
|
|
||||||
- [#1239] Custom service port name for spark application UI ([#1240](https://github.com/kubeflow/spark-operator/pull/1240) by [@marcozov](https://github.com/marcozov))
|
|
||||||
- fix: do not remove preemptionPolicy in patcher when not present ([#1246](https://github.com/kubeflow/spark-operator/pull/1246) by [@HHK1](https://github.com/HHK1))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.0...spark-operator-chart-1.1.1)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.1.0](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.0) (2021-04-28)
|
|
||||||
|
|
||||||
- Updating Spark version from 3.0 to 3.1.1 ([#1153](https://github.com/kubeflow/spark-operator/pull/1153) by [@chethanuk](https://github.com/chethanuk))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.10...spark-operator-chart-1.1.0)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.0.10](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.10) (2021-04-28)
|
|
||||||
|
|
||||||
- Add support for blue/green deployments ([#1230](https://github.com/kubeflow/spark-operator/pull/1230) by [@flupke](https://github.com/flupke))
|
|
||||||
- Update who-is-using.md: Fossil is using Spark Operator for Production ([#1244](https://github.com/kubeflow/spark-operator/pull/1244) by [@duyet](https://github.com/duyet))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.9...spark-operator-chart-1.0.10)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.0.9](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.9) (2021-04-23)
|
|
||||||
|
|
||||||
- Link to Kubernetes Slack ([#1234](https://github.com/kubeflow/spark-operator/pull/1234) by [@jsoref](https://github.com/jsoref))
|
|
||||||
- fix: remove preemptionPolicy when priority class name is used ([#1236](https://github.com/kubeflow/spark-operator/pull/1236) by [@HHK1](https://github.com/HHK1))
|
|
||||||
- Spelling ([#1231](https://github.com/kubeflow/spark-operator/pull/1231) by [@jsoref](https://github.com/jsoref))
|
|
||||||
- Add support to expose custom ports ([#1205](https://github.com/kubeflow/spark-operator/pull/1205) by [@luizm](https://github.com/luizm))
|
|
||||||
- Fix the error of hostAliases when there are more than 2 hostnames ([#1209](https://github.com/kubeflow/spark-operator/pull/1209) by [@cdmikechen](https://github.com/cdmikechen))
|
|
||||||
- remove multiple prefixes for 'p' ([#1210](https://github.com/kubeflow/spark-operator/pull/1210) by [@chaudhryfaisal](https://github.com/chaudhryfaisal))
|
|
||||||
- added --s3-force-path-style to force path style URLs for S3 objects ([#1206](https://github.com/kubeflow/spark-operator/pull/1206) by [@chaudhryfaisal](https://github.com/chaudhryfaisal))
|
|
||||||
- Allow custom bucket path ([#1207](https://github.com/kubeflow/spark-operator/pull/1207) by [@bribroder](https://github.com/bribroder))
|
|
||||||
- fix: Remove priority from the spec when using priority class ([#1203](https://github.com/kubeflow/spark-operator/pull/1203) by [@HHK1](https://github.com/HHK1))
|
|
||||||
- Fix go get issue with "unknown revision v0.0.0" ([#1198](https://github.com/kubeflow/spark-operator/pull/1198) by [@hongshaoyang](https://github.com/hongshaoyang))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.8...spark-operator-chart-1.0.9)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.0.8](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.8) (2021-03-07)
|
|
||||||
|
|
||||||
- Helm: Put service account into pre-install hook. ([#1155](https://github.com/kubeflow/spark-operator/pull/1155) by [@tandrup](https://github.com/tandrup))
|
|
||||||
- correct hook annotation for webhook job ([#1193](https://github.com/kubeflow/spark-operator/pull/1193) by [@chaudhryfaisal](https://github.com/chaudhryfaisal))
|
|
||||||
- Update who-is-using.md ([#1174](https://github.com/kubeflow/spark-operator/pull/1174) by [@tarek-izemrane](https://github.com/tarek-izemrane))
|
|
||||||
- add Carrefour as adopter and contributor ([#1156](https://github.com/kubeflow/spark-operator/pull/1156) by [@AliGouta](https://github.com/AliGouta))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.7...spark-operator-chart-1.0.8)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.0.7](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.7) (2021-02-05)
|
|
||||||
|
|
||||||
- fix issue #1131 ([#1142](https://github.com/kubeflow/spark-operator/pull/1142) by [@kz33](https://github.com/kz33))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.6...spark-operator-chart-1.0.7)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.0.6](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.6) (2021-02-04)
|
|
||||||
|
|
||||||
- Add Fossil to who-is-using.md ([#1152](https://github.com/kubeflow/spark-operator/pull/1152) by [@duyet](https://github.com/duyet))
|
|
||||||
- #1143 Helm issues while deploying using argocd ([#1145](https://github.com/kubeflow/spark-operator/pull/1145) by [@TomHellier](https://github.com/TomHellier))
|
|
||||||
- Include Gojek in who-is-using.md ([#1146](https://github.com/kubeflow/spark-operator/pull/1146) by [@pradithya](https://github.com/pradithya))
|
|
||||||
- add hostAliases for SparkPodSpec ([#1133](https://github.com/kubeflow/spark-operator/pull/1133) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
- Adding MavenCode ([#1128](https://github.com/kubeflow/spark-operator/pull/1128) by [@charlesa101](https://github.com/charlesa101))
|
|
||||||
- Add MongoDB to who-is-using.md ([#1123](https://github.com/kubeflow/spark-operator/pull/1123) by [@chickenPopcorn](https://github.com/chickenPopcorn))
|
|
||||||
- update go version to 1.15 and k8s deps to v0.19.6 ([#1119](https://github.com/kubeflow/spark-operator/pull/1119) by [@stpabhi](https://github.com/stpabhi))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.5...spark-operator-chart-1.0.6)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.0.5](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.5) (2020-12-15)
|
|
||||||
|
|
||||||
- Add prometheus containr port name ([#1099](https://github.com/kubeflow/spark-operator/pull/1099) by [@nicholas-fwang](https://github.com/nicholas-fwang))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.4...spark-operator-chart-1.0.5)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.0.4](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.4) (2020-12-12)
|
|
||||||
|
|
||||||
- Upgrade the Chart version to 1.0.4 ([#1113](https://github.com/kubeflow/spark-operator/pull/1113) by [@ordukhanian](https://github.com/ordukhanian))
|
|
||||||
- Support Prometheus PodMonitor Deployment (#1106) ([#1112](https://github.com/kubeflow/spark-operator/pull/1112) by [@ordukhanian](https://github.com/ordukhanian))
|
|
||||||
- update executor status if pod is lost while app is still running ([#1111](https://github.com/kubeflow/spark-operator/pull/1111) by [@ImpSy](https://github.com/ImpSy))
|
|
||||||
- Add scheduler func for clearing batch scheduling on completed ([#1079](https://github.com/kubeflow/spark-operator/pull/1079) by [@nicholas-fwang](https://github.com/nicholas-fwang))
|
|
||||||
- Add configuration for SparkUI service type ([#1100](https://github.com/kubeflow/spark-operator/pull/1100) by [@jutley](https://github.com/jutley))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.3...spark-operator-chart-1.0.4)
|
|
||||||
|
|
||||||
## [spark-operator-chart-1.0.3](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.3) (2020-12-07)
|
|
||||||
|
|
||||||
- Update docs with new helm instructions ([#1105](https://github.com/kubeflow/spark-operator/pull/1105) by [@hagaibarel](https://github.com/hagaibarel))
|
|
||||||
|
|
||||||
[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.2...spark-operator-chart-1.0.3)
|
|
12
Dockerfile
12
Dockerfile
|
@ -14,9 +14,9 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
ARG SPARK_IMAGE=docker.io/library/spark:4.0.0
|
ARG SPARK_IMAGE=spark:3.5.2
|
||||||
|
|
||||||
FROM golang:1.24.1 AS builder
|
FROM golang:1.23.1 AS builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
@ -26,9 +26,7 @@ RUN --mount=type=cache,target=/go/pkg/mod/ \
|
||||||
go mod download
|
go mod download
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
ENV GOCACHE=/root/.cache/go-build
|
ENV GOCACHE=/root/.cache/go-build
|
||||||
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/go/pkg/mod/ \
|
RUN --mount=type=cache,target=/go/pkg/mod/ \
|
||||||
|
@ -37,10 +35,6 @@ RUN --mount=type=cache,target=/go/pkg/mod/ \
|
||||||
|
|
||||||
FROM ${SPARK_IMAGE}
|
FROM ${SPARK_IMAGE}
|
||||||
|
|
||||||
ARG SPARK_UID=185
|
|
||||||
|
|
||||||
ARG SPARK_GID=185
|
|
||||||
|
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
|
@ -51,7 +45,7 @@ RUN mkdir -p /etc/k8s-webhook-server/serving-certs /home/spark && \
|
||||||
chmod -R g+rw /etc/k8s-webhook-server/serving-certs && \
|
chmod -R g+rw /etc/k8s-webhook-server/serving-certs && \
|
||||||
chown -R spark /etc/k8s-webhook-server/serving-certs /home/spark
|
chown -R spark /etc/k8s-webhook-server/serving-certs /home/spark
|
||||||
|
|
||||||
USER ${SPARK_UID}:${SPARK_GID}
|
USER spark
|
||||||
|
|
||||||
COPY --from=builder /workspace/bin/spark-operator /usr/bin/spark-operator
|
COPY --from=builder /workspace/bin/spark-operator /usr/bin/spark-operator
|
||||||
|
|
||||||
|
|
82
Makefile
82
Makefile
|
@ -21,7 +21,7 @@ GIT_TREE_STATE := $(shell if [ -z "`git status --porcelain`" ]; then echo "clean
|
||||||
GIT_SHA := $(shell git rev-parse --short HEAD || echo "HEAD")
|
GIT_SHA := $(shell git rev-parse --short HEAD || echo "HEAD")
|
||||||
GIT_VERSION := ${VERSION}+${GIT_SHA}
|
GIT_VERSION := ${VERSION}+${GIT_SHA}
|
||||||
|
|
||||||
MODULE_PATH := $(shell awk '/^module/{print $$2; exit}' go.mod)
|
REPO := github.com/kubeflow/spark-operator
|
||||||
SPARK_OPERATOR_GOPATH := /go/src/github.com/kubeflow/spark-operator
|
SPARK_OPERATOR_GOPATH := /go/src/github.com/kubeflow/spark-operator
|
||||||
SPARK_OPERATOR_CHART_PATH := charts/spark-operator-chart
|
SPARK_OPERATOR_CHART_PATH := charts/spark-operator-chart
|
||||||
DEP_VERSION := `grep DEP_VERSION= Dockerfile | awk -F\" '{print $$2}'`
|
DEP_VERSION := `grep DEP_VERSION= Dockerfile | awk -F\" '{print $$2}'`
|
||||||
|
@ -35,8 +35,8 @@ UNAME := `uname | tr '[:upper:]' '[:lower:]'`
|
||||||
CONTAINER_TOOL ?= docker
|
CONTAINER_TOOL ?= docker
|
||||||
|
|
||||||
# Image URL to use all building/pushing image targets
|
# Image URL to use all building/pushing image targets
|
||||||
IMAGE_REGISTRY ?= ghcr.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_REPOSITORY ?= kubeflow/spark-operator/controller
|
IMAGE_REPOSITORY ?= kubeflow/spark-operator
|
||||||
IMAGE_TAG ?= $(VERSION)
|
IMAGE_TAG ?= $(VERSION)
|
||||||
IMAGE ?= $(IMAGE_REGISTRY)/$(IMAGE_REPOSITORY):$(IMAGE_TAG)
|
IMAGE ?= $(IMAGE_REGISTRY)/$(IMAGE_REPOSITORY):$(IMAGE_TAG)
|
||||||
|
|
||||||
|
@ -50,21 +50,20 @@ LOCALBIN ?= $(shell pwd)/bin
|
||||||
|
|
||||||
## Versions
|
## Versions
|
||||||
KUSTOMIZE_VERSION ?= v5.4.1
|
KUSTOMIZE_VERSION ?= v5.4.1
|
||||||
CONTROLLER_TOOLS_VERSION ?= v0.17.1
|
CONTROLLER_TOOLS_VERSION ?= v0.15.0
|
||||||
KIND_VERSION ?= v0.23.0
|
KIND_VERSION ?= v0.23.0
|
||||||
KIND_K8S_VERSION ?= v1.32.0
|
ENVTEST_VERSION ?= release-0.18
|
||||||
ENVTEST_VERSION ?= release-0.20
|
|
||||||
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
||||||
ENVTEST_K8S_VERSION ?= 1.32.0
|
ENVTEST_K8S_VERSION ?= 1.29.3
|
||||||
GOLANGCI_LINT_VERSION ?= v2.1.6
|
GOLANGCI_LINT_VERSION ?= v1.61.0
|
||||||
GEN_CRD_API_REFERENCE_DOCS_VERSION ?= v0.3.0
|
GEN_CRD_API_REFERENCE_DOCS_VERSION ?= v0.3.0
|
||||||
HELM_VERSION ?= v3.15.3
|
HELM_VERSION ?= v3.15.3
|
||||||
HELM_UNITTEST_VERSION ?= 0.5.1
|
HELM_UNITTEST_VERSION ?= 0.5.1
|
||||||
HELM_DOCS_VERSION ?= v1.14.2
|
HELM_DOCS_VERSION ?= v1.14.2
|
||||||
CODE_GENERATOR_VERSION ?= v0.33.1
|
|
||||||
|
|
||||||
## Binaries
|
## Binaries
|
||||||
SPARK_OPERATOR ?= $(LOCALBIN)/spark-operator
|
SPARK_OPERATOR ?= $(LOCALBIN)/spark-operator
|
||||||
|
SPARKCTL ?= $(LOCALBIN)/sparkctl
|
||||||
KUBECTL ?= kubectl
|
KUBECTL ?= kubectl
|
||||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION)
|
KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION)
|
||||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION)
|
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION)
|
||||||
|
@ -102,14 +101,11 @@ version: ## Print version information.
|
||||||
@echo "Git SHA: ${GIT_SHA}"
|
@echo "Git SHA: ${GIT_SHA}"
|
||||||
@echo "Git Version: ${GIT_VERSION}"
|
@echo "Git Version: ${GIT_VERSION}"
|
||||||
|
|
||||||
.PHONY: print-%
|
|
||||||
print-%: ; @echo $*=$($*)
|
|
||||||
|
|
||||||
##@ Development
|
##@ Development
|
||||||
|
|
||||||
.PHONY: manifests
|
.PHONY: manifests
|
||||||
manifests: controller-gen ## Generate CustomResourceDefinition, RBAC and WebhookConfiguration manifests.
|
manifests: controller-gen ## Generate CustomResourceDefinition, RBAC and WebhookConfiguration manifests.
|
||||||
$(CONTROLLER_GEN) crd:generateEmbeddedObjectMeta=true rbac:roleName=spark-operator-controller webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
$(CONTROLLER_GEN) crd rbac:roleName=spark-operator-controller webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||||
|
|
||||||
.PHONY: generate
|
.PHONY: generate
|
||||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||||
|
@ -119,14 +115,6 @@ generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and
|
||||||
update-crd: manifests ## Update CRD files in the Helm chart.
|
update-crd: manifests ## Update CRD files in the Helm chart.
|
||||||
cp config/crd/bases/* charts/spark-operator-chart/crds/
|
cp config/crd/bases/* charts/spark-operator-chart/crds/
|
||||||
|
|
||||||
.PHONY: verify-codegen
|
|
||||||
verify-codegen: $(LOCALBIN) ## Install code-generator commands and verify changes
|
|
||||||
$(call go-install-tool,$(LOCALBIN)/register-gen-$(CODE_GENERATOR_VERSION),k8s.io/code-generator/cmd/register-gen,$(CODE_GENERATOR_VERSION))
|
|
||||||
$(call go-install-tool,$(LOCALBIN)/client-gen-$(CODE_GENERATOR_VERSION),k8s.io/code-generator/cmd/client-gen,$(CODE_GENERATOR_VERSION))
|
|
||||||
$(call go-install-tool,$(LOCALBIN)/lister-gen-$(CODE_GENERATOR_VERSION),k8s.io/code-generator/cmd/lister-gen,$(CODE_GENERATOR_VERSION))
|
|
||||||
$(call go-install-tool,$(LOCALBIN)/informer-gen-$(CODE_GENERATOR_VERSION),k8s.io/code-generator/cmd/informer-gen,$(CODE_GENERATOR_VERSION))
|
|
||||||
./hack/verify-codegen.sh
|
|
||||||
|
|
||||||
.PHONY: go-clean
|
.PHONY: go-clean
|
||||||
go-clean: ## Clean up caches and output.
|
go-clean: ## Clean up caches and output.
|
||||||
@echo "cleaning up caches and output"
|
@echo "cleaning up caches and output"
|
||||||
|
@ -172,26 +160,37 @@ e2e-test: envtest ## Run the e2e tests against a Kind k8s instance that is spun
|
||||||
##@ Build
|
##@ Build
|
||||||
|
|
||||||
override LDFLAGS += \
|
override LDFLAGS += \
|
||||||
-X ${MODULE_PATH}.version=${GIT_VERSION} \
|
-X ${REPO}.version=${GIT_VERSION} \
|
||||||
-X ${MODULE_PATH}.buildDate=${BUILD_DATE} \
|
-X ${REPO}.buildDate=${BUILD_DATE} \
|
||||||
-X ${MODULE_PATH}.gitCommit=${GIT_COMMIT} \
|
-X ${REPO}.gitCommit=${GIT_COMMIT} \
|
||||||
-X ${MODULE_PATH}.gitTreeState=${GIT_TREE_STATE} \
|
-X ${REPO}.gitTreeState=${GIT_TREE_STATE} \
|
||||||
-extldflags "-static"
|
-extldflags "-static"
|
||||||
|
|
||||||
.PHONY: build-operator
|
.PHONY: build-operator
|
||||||
build-operator: ## Build Spark operator.
|
build-operator: ## Build Spark operator.
|
||||||
echo "Building spark-operator binary..."
|
echo "Building spark-operator binary..."
|
||||||
CGO_ENABLED=0 go build -o $(SPARK_OPERATOR) -ldflags '${LDFLAGS}' cmd/operator/main.go
|
go build -o $(SPARK_OPERATOR) -ldflags '${LDFLAGS}' cmd/main.go
|
||||||
|
|
||||||
|
.PHONY: build-sparkctl
|
||||||
|
build-sparkctl: ## Build sparkctl binary.
|
||||||
|
echo "Building sparkctl binary..."
|
||||||
|
CGO_ENABLED=0 go build -o $(SPARKCTL) -buildvcs=false sparkctl/main.go
|
||||||
|
|
||||||
|
.PHONY: install-sparkctl
|
||||||
|
install-sparkctl: build-sparkctl ## Install sparkctl binary.
|
||||||
|
echo "Installing sparkctl binary to /usr/local/bin..."; \
|
||||||
|
sudo cp $(SPARKCTL) /usr/local/bin
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: ## Clean binaries.
|
clean: ## Clean spark-operator and sparktcl binaries.
|
||||||
rm -f $(SPARK_OPERATOR)
|
rm -f $(SPARK_OPERATOR)
|
||||||
|
rm -f $(SPARKCTL)
|
||||||
|
|
||||||
.PHONY: build-api-docs
|
.PHONY: build-api-docs
|
||||||
build-api-docs: gen-crd-api-reference-docs ## Build api documentation.
|
build-api-docs: gen-crd-api-reference-docs ## Build api documentaion.
|
||||||
$(GEN_CRD_API_REFERENCE_DOCS) \
|
$(GEN_CRD_API_REFERENCE_DOCS) \
|
||||||
-config hack/api-docs/config.json \
|
-config hack/api-docs/config.json \
|
||||||
-api-dir github.com/kubeflow/spark-operator/v2/api/v1beta2 \
|
-api-dir github.com/kubeflow/spark-operator/api/v1beta2 \
|
||||||
-template-dir hack/api-docs/template \
|
-template-dir hack/api-docs/template \
|
||||||
-out-file docs/api-docs.md
|
-out-file docs/api-docs.md
|
||||||
|
|
||||||
|
@ -247,38 +246,33 @@ endif
|
||||||
.PHONY: kind-create-cluster
|
.PHONY: kind-create-cluster
|
||||||
kind-create-cluster: kind ## Create a kind cluster for integration tests.
|
kind-create-cluster: kind ## Create a kind cluster for integration tests.
|
||||||
if ! $(KIND) get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER_NAME)$$"; then \
|
if ! $(KIND) get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER_NAME)$$"; then \
|
||||||
$(KIND) create cluster \
|
kind create cluster --name $(KIND_CLUSTER_NAME) --config $(KIND_CONFIG_FILE) --kubeconfig $(KIND_KUBE_CONFIG) --wait=1m; \
|
||||||
--name $(KIND_CLUSTER_NAME) \
|
|
||||||
--config $(KIND_CONFIG_FILE) \
|
|
||||||
--image kindest/node:$(KIND_K8S_VERSION) \
|
|
||||||
--kubeconfig $(KIND_KUBE_CONFIG) \
|
|
||||||
--wait=1m; \
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
.PHONY: kind-load-image
|
.PHONY: kind-load-image
|
||||||
kind-load-image: kind-create-cluster docker-build ## Load the image into the kind cluster.
|
kind-load-image: kind-create-cluster docker-build ## Load the image into the kind cluster.
|
||||||
$(KIND) load docker-image --name $(KIND_CLUSTER_NAME) $(IMAGE)
|
$(KIND) load docker-image --name $(KIND_CLUSTER_NAME) $(IMAGE)
|
||||||
|
|
||||||
.PHONY: kind-delete-cluster
|
.PHONY: kind-delete-custer
|
||||||
kind-delete-cluster: kind ## Delete the created kind cluster.
|
kind-delete-custer: kind ## Delete the created kind cluster.
|
||||||
$(KIND) delete cluster --name $(KIND_CLUSTER_NAME) --kubeconfig $(KIND_KUBE_CONFIG)
|
$(KIND) delete cluster --name $(KIND_CLUSTER_NAME) --kubeconfig $(KIND_KUBE_CONFIG)
|
||||||
|
|
||||||
.PHONY: install
|
.PHONY: install
|
||||||
install-crd: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
install-crd: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||||
$(KUSTOMIZE) build config/crd | $(KUBECTL) create -f - 2>/dev/null || $(KUSTOMIZE) build config/crd | $(KUBECTL) replace -f -
|
$(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f -
|
||||||
|
|
||||||
.PHONY: uninstall
|
.PHONY: uninstall
|
||||||
uninstall-crd: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
uninstall-crd: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||||
$(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
|
$(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
|
||||||
|
|
||||||
.PHONY: deploy
|
.PHONY: deploy
|
||||||
deploy: IMAGE_TAG=local
|
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||||
deploy: helm manifests update-crd kind-load-image ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||||
$(HELM) upgrade --install -f charts/spark-operator-chart/ci/ci-values.yaml spark-operator ./charts/spark-operator-chart/
|
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -
|
||||||
|
|
||||||
.PHONY: undeploy
|
.PHONY: undeploy
|
||||||
undeploy: helm ## Uninstall spark-operator
|
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||||
$(HELM) uninstall spark-operator
|
$(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
|
||||||
|
|
||||||
##@ Dependencies
|
##@ Dependencies
|
||||||
|
|
||||||
|
@ -308,7 +302,7 @@ $(ENVTEST): $(LOCALBIN)
|
||||||
.PHONY: golangci-lint
|
.PHONY: golangci-lint
|
||||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
|
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
|
||||||
|
|
||||||
.PHONY: gen-crd-api-reference-docs
|
.PHONY: gen-crd-api-reference-docs
|
||||||
gen-crd-api-reference-docs: $(GEN_CRD_API_REFERENCE_DOCS) ## Download gen-crd-api-reference-docs locally if necessary.
|
gen-crd-api-reference-docs: $(GEN_CRD_API_REFERENCE_DOCS) ## Download gen-crd-api-reference-docs locally if necessary.
|
||||||
|
|
7
OWNERS
7
OWNERS
|
@ -1,10 +1,7 @@
|
||||||
approvers:
|
approvers:
|
||||||
- andreyvelich
|
- andreyvelich
|
||||||
- ChenYi015
|
|
||||||
- jacobsalway
|
|
||||||
- mwielgus
|
- mwielgus
|
||||||
- vara-bonthu
|
|
||||||
- yuchaoran2011
|
- yuchaoran2011
|
||||||
|
- vara-bonthu
|
||||||
reviewers:
|
reviewers:
|
||||||
- ImpSy
|
- ChenYi015
|
||||||
- nabuskey
|
|
||||||
|
|
14
PROJECT
14
PROJECT
|
@ -13,9 +13,17 @@ resources:
|
||||||
namespaced: true
|
namespaced: true
|
||||||
controller: true
|
controller: true
|
||||||
domain: sparkoperator.k8s.io
|
domain: sparkoperator.k8s.io
|
||||||
kind: SparkConnect
|
kind: SparkApplication
|
||||||
path: github.com/kubeflow/spark-operator/api/v1alpha1
|
path: github.com/kubeflow/spark-operator/api/v1beta1
|
||||||
version: v1alpha1
|
version: v1beta1
|
||||||
|
- api:
|
||||||
|
crdVersion: v1
|
||||||
|
namespaced: true
|
||||||
|
controller: true
|
||||||
|
domain: sparkoperator.k8s.io
|
||||||
|
kind: ScheduledSparkApplication
|
||||||
|
path: github.com/kubeflow/spark-operator/api/v1beta1
|
||||||
|
version: v1beta1
|
||||||
- api:
|
- api:
|
||||||
crdVersion: v1
|
crdVersion: v1
|
||||||
namespaced: true
|
namespaced: true
|
||||||
|
|
56
README.md
56
README.md
|
@ -1,39 +1,12 @@
|
||||||
# Kubeflow Spark Operator
|
# Kubeflow Spark Operator
|
||||||
|
|
||||||
[](https://github.com/kubeflow/spark-operator/actions/workflows/integration.yaml)
|
|
||||||
[](https://goreportcard.com/report/github.com/kubeflow/spark-operator)
|
[](https://goreportcard.com/report/github.com/kubeflow/spark-operator)
|
||||||
[](https://github.com/kubeflow/spark-operator/releases)
|
|
||||||
[](https://www.bestpractices.dev/projects/10524)
|
|
||||||
|
|
||||||
## What is Spark Operator?
|
## What is Spark Operator?
|
||||||
|
|
||||||
The Kubernetes Operator for Apache Spark aims to make specifying and running [Spark](https://github.com/apache/spark) applications as easy and idiomatic as running other workloads on Kubernetes. It uses
|
The Kubernetes Operator for Apache Spark aims to make specifying and running [Spark](https://github.com/apache/spark) applications as easy and idiomatic as running other workloads on Kubernetes. It uses
|
||||||
[Kubernetes custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) for specifying, running, and surfacing status of Spark applications.
|
[Kubernetes custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) for specifying, running, and surfacing status of Spark applications.
|
||||||
|
|
||||||
## Quick Start
|
|
||||||
|
|
||||||
For a more detailed guide, please refer to the [Getting Started guide](https://www.kubeflow.org/docs/components/spark-operator/getting-started/).
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Add the Helm repository
|
|
||||||
helm repo add --force-update spark-operator https://kubeflow.github.io/spark-operator
|
|
||||||
|
|
||||||
# Install the operator into the spark-operator namespace and wait for deployments to be ready
|
|
||||||
helm install spark-operator spark-operator/spark-operator \
|
|
||||||
--namespace spark-operator \
|
|
||||||
--create-namespace \
|
|
||||||
--wait
|
|
||||||
|
|
||||||
# Create an example application in the default namespace
|
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubeflow/spark-operator/refs/heads/master/examples/spark-pi.yaml
|
|
||||||
|
|
||||||
# Get the status of the application
|
|
||||||
kubectl get sparkapp spark-pi
|
|
||||||
|
|
||||||
# Delete the application
|
|
||||||
kubectl delete sparkapp spark-pi
|
|
||||||
```
|
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
For a complete reference of the custom resource definitions, please refer to the [API Definition](docs/api-docs.md). For details on its design, please refer to the [Architecture](https://www.kubeflow.org/docs/components/spark-operator/overview/#architecture). It requires Spark 2.3 and above that supports Kubernetes as a native scheduler backend.
|
For a complete reference of the custom resource definitions, please refer to the [API Definition](docs/api-docs.md). For details on its design, please refer to the [Architecture](https://www.kubeflow.org/docs/components/spark-operator/overview/#architecture). It requires Spark 2.3 and above that supports Kubernetes as a native scheduler backend.
|
||||||
|
@ -48,6 +21,8 @@ The Kubernetes Operator for Apache Spark currently supports the following list o
|
||||||
* Supports automatic application re-submission for updated `SparkApplication` objects with updated specification.
|
* Supports automatic application re-submission for updated `SparkApplication` objects with updated specification.
|
||||||
* Supports automatic application restart with a configurable restart policy.
|
* Supports automatic application restart with a configurable restart policy.
|
||||||
* Supports automatic retries of failed submissions with optional linear back-off.
|
* Supports automatic retries of failed submissions with optional linear back-off.
|
||||||
|
* Supports mounting local Hadoop configuration as a Kubernetes ConfigMap automatically via `sparkctl`.
|
||||||
|
* Supports automatically staging local application dependencies to Google Cloud Storage (GCS) via `sparkctl`.
|
||||||
* Supports collecting and exporting application-level metrics and driver/executor metrics to Prometheus.
|
* Supports collecting and exporting application-level metrics and driver/executor metrics to Prometheus.
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
@ -78,21 +53,18 @@ If you are running Spark operator on Google Kubernetes Engine (GKE) and want to
|
||||||
|
|
||||||
The following table lists the most recent few versions of the operator.
|
The following table lists the most recent few versions of the operator.
|
||||||
|
|
||||||
| Operator Version | API Version | Kubernetes Version | Base Spark Version |
|
| Operator Version | API Version | Kubernetes Version | Base Spark Version |
|
||||||
|-----------------------|-------------|--------------------|--------------------|
|
| ------------- | ------------- | ------------- | ------------- |
|
||||||
| `v2.2.x` | `v1beta2` | 1.16+ | `3.5.5` |
|
| `v1beta2-1.6.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
|
||||||
| `v2.1.x` | `v1beta2` | 1.16+ | `3.5.3` |
|
| `v1beta2-1.5.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
|
||||||
| `v2.0.x` | `v1beta2` | 1.16+ | `3.5.2` |
|
| `v1beta2-1.4.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
|
||||||
| `v1beta2-1.6.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
|
| `v1beta2-1.3.x-3.1.1` | `v1beta2` | 1.16+ | `3.1.1` |
|
||||||
| `v1beta2-1.5.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
|
| `v1beta2-1.2.3-3.1.1` | `v1beta2` | 1.13+ | `3.1.1` |
|
||||||
| `v1beta2-1.4.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
|
| `v1beta2-1.2.2-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` |
|
||||||
| `v1beta2-1.3.x-3.1.1` | `v1beta2` | 1.16+ | `3.1.1` |
|
| `v1beta2-1.2.1-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` |
|
||||||
| `v1beta2-1.2.3-3.1.1` | `v1beta2` | 1.13+ | `3.1.1` |
|
| `v1beta2-1.2.0-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` |
|
||||||
| `v1beta2-1.2.2-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` |
|
| `v1beta2-1.1.x-2.4.5` | `v1beta2` | 1.13+ | `2.4.5` |
|
||||||
| `v1beta2-1.2.1-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` |
|
| `v1beta2-1.0.x-2.4.4` | `v1beta2` | 1.13+ | `2.4.4` |
|
||||||
| `v1beta2-1.2.0-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` |
|
|
||||||
| `v1beta2-1.1.x-2.4.5` | `v1beta2` | 1.13+ | `2.4.5` |
|
|
||||||
| `v1beta2-1.0.x-2.4.4` | `v1beta2` | 1.13+ | `2.4.4` |
|
|
||||||
|
|
||||||
## Developer Guide
|
## Developer Guide
|
||||||
|
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2025 The Kubeflow authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
// DeployMode describes the type of deployment of a Spark application.
|
|
||||||
type DeployMode string
|
|
||||||
|
|
||||||
// Different types of deployments.
|
|
||||||
const (
|
|
||||||
DeployModeCluster DeployMode = "cluster"
|
|
||||||
DeployModeClient DeployMode = "client"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DriverState tells the current state of a spark driver.
|
|
||||||
type DriverState string
|
|
||||||
|
|
||||||
// Different states a spark driver may have.
|
|
||||||
const (
|
|
||||||
DriverStatePending DriverState = "PENDING"
|
|
||||||
DriverStateRunning DriverState = "RUNNING"
|
|
||||||
DriverStateCompleted DriverState = "COMPLETED"
|
|
||||||
DriverStateFailed DriverState = "FAILED"
|
|
||||||
DriverStateUnknown DriverState = "UNKNOWN"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ExecutorState tells the current state of an executor.
|
|
||||||
type ExecutorState string
|
|
||||||
|
|
||||||
// Different states an executor may have.
|
|
||||||
const (
|
|
||||||
ExecutorStatePending ExecutorState = "PENDING"
|
|
||||||
ExecutorStateRunning ExecutorState = "RUNNING"
|
|
||||||
ExecutorStateCompleted ExecutorState = "COMPLETED"
|
|
||||||
ExecutorStateFailed ExecutorState = "FAILED"
|
|
||||||
ExecutorStateUnknown ExecutorState = "UNKNOWN"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DynamicAllocation contains configuration options for dynamic allocation.
|
|
||||||
type DynamicAllocation struct {
|
|
||||||
// Enabled controls whether dynamic allocation is enabled or not.
|
|
||||||
// +optional
|
|
||||||
Enabled bool `json:"enabled,omitempty"`
|
|
||||||
|
|
||||||
// InitialExecutors is the initial number of executors to request. If .spec.executor.instances
|
|
||||||
// is also set, the initial number of executors is set to the bigger of that and this option.
|
|
||||||
// +optional
|
|
||||||
InitialExecutors *int32 `json:"initialExecutors,omitempty"`
|
|
||||||
|
|
||||||
// MinExecutors is the lower bound for the number of executors if dynamic allocation is enabled.
|
|
||||||
// +optional
|
|
||||||
MinExecutors *int32 `json:"minExecutors,omitempty"`
|
|
||||||
|
|
||||||
// MaxExecutors is the upper bound for the number of executors if dynamic allocation is enabled.
|
|
||||||
// +optional
|
|
||||||
MaxExecutors *int32 `json:"maxExecutors,omitempty"`
|
|
||||||
|
|
||||||
// ShuffleTrackingEnabled enables shuffle file tracking for executors, which allows dynamic allocation without
|
|
||||||
// the need for an external shuffle service. This option will try to keep alive executors that are storing
|
|
||||||
// shuffle data for active jobs. If external shuffle service is enabled, set ShuffleTrackingEnabled to false.
|
|
||||||
// ShuffleTrackingEnabled is true by default if dynamicAllocation.enabled is true.
|
|
||||||
// +optional
|
|
||||||
ShuffleTrackingEnabled *bool `json:"shuffleTrackingEnabled,omitempty"`
|
|
||||||
|
|
||||||
// ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding
|
|
||||||
// shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled).
|
|
||||||
// +optional
|
|
||||||
ShuffleTrackingTimeout *int64 `json:"shuffleTrackingTimeout,omitempty"`
|
|
||||||
}
|
|
|
@ -1,185 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2025 The Kubeflow authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
SchemeBuilder.Register(&SparkConnect{}, &SparkConnectList{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
|
||||||
// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298"
|
|
||||||
// +kubebuilder:resource:scope=Namespaced,shortName=sparkconn,singular=sparkconnect
|
|
||||||
// +kubebuilder:subresource:status
|
|
||||||
// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date
|
|
||||||
|
|
||||||
// SparkConnect is the Schema for the sparkconnections API.
|
|
||||||
type SparkConnect struct {
|
|
||||||
metav1.TypeMeta `json:",inline"`
|
|
||||||
metav1.ObjectMeta `json:"metadata"`
|
|
||||||
|
|
||||||
Spec SparkConnectSpec `json:"spec"`
|
|
||||||
Status SparkConnectStatus `json:"status,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
|
||||||
|
|
||||||
// SparkConnectList contains a list of SparkConnect.
|
|
||||||
type SparkConnectList struct {
|
|
||||||
metav1.TypeMeta `json:",inline"`
|
|
||||||
metav1.ListMeta `json:"metadata,omitempty"`
|
|
||||||
Items []SparkConnect `json:"items"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SparkConnectSpec defines the desired state of SparkConnect.
|
|
||||||
type SparkConnectSpec struct {
|
|
||||||
// SparkVersion is the version of Spark the spark connect use.
|
|
||||||
SparkVersion string `json:"sparkVersion"`
|
|
||||||
|
|
||||||
// Image is the container image for the driver, executor, and init-container. Any custom container images for the
|
|
||||||
// driver, executor, or init-container takes precedence over this.
|
|
||||||
// +optional
|
|
||||||
Image *string `json:"image,omitempty"`
|
|
||||||
|
|
||||||
// HadoopConf carries user-specified Hadoop configuration properties as they would use the "--conf" option
|
|
||||||
// in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop
|
|
||||||
// configuration properties.
|
|
||||||
// +optional
|
|
||||||
HadoopConf map[string]string `json:"hadoopConf,omitempty"`
|
|
||||||
|
|
||||||
// SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in
|
|
||||||
// spark-submit.
|
|
||||||
// +optional
|
|
||||||
SparkConf map[string]string `json:"sparkConf,omitempty"`
|
|
||||||
|
|
||||||
// Server is the Spark connect server specification.
|
|
||||||
Server ServerSpec `json:"server"`
|
|
||||||
|
|
||||||
// Executor is the Spark executor specification.
|
|
||||||
Executor ExecutorSpec `json:"executor"`
|
|
||||||
|
|
||||||
// DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes
|
|
||||||
// scheduler backend since Spark 3.0.
|
|
||||||
// +optional
|
|
||||||
DynamicAllocation *DynamicAllocation `json:"dynamicAllocation,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerSpec is specification of the Spark connect server.
|
|
||||||
type ServerSpec struct {
|
|
||||||
SparkPodSpec `json:",inline"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecutorSpec is specification of the executor.
|
|
||||||
type ExecutorSpec struct {
|
|
||||||
SparkPodSpec `json:",inline"`
|
|
||||||
|
|
||||||
// Instances is the number of executor instances.
|
|
||||||
// +optional
|
|
||||||
// +kubebuilder:validation:Minimum=0
|
|
||||||
Instances *int32 `json:"instances,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SparkPodSpec defines common things that can be customized for a Spark driver or executor pod.
|
|
||||||
type SparkPodSpec struct {
|
|
||||||
// Cores maps to `spark.driver.cores` or `spark.executor.cores` for the driver and executors, respectively.
|
|
||||||
// +optional
|
|
||||||
// +kubebuilder:validation:Minimum=1
|
|
||||||
Cores *int32 `json:"cores,omitempty"`
|
|
||||||
|
|
||||||
// Memory is the amount of memory to request for the pod.
|
|
||||||
// +optional
|
|
||||||
Memory *string `json:"memory,omitempty"`
|
|
||||||
|
|
||||||
// Template is a pod template that can be used to define the driver or executor pod configurations that Spark configurations do not support.
|
|
||||||
// Spark version >= 3.0.0 is required.
|
|
||||||
// Ref: https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template.
|
|
||||||
// +optional
|
|
||||||
// +kubebuilder:validation:Schemaless
|
|
||||||
// +kubebuilder:validation:Type:=object
|
|
||||||
// +kubebuilder:pruning:PreserveUnknownFields
|
|
||||||
Template *corev1.PodTemplateSpec `json:"template,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SparkConnectStatus defines the observed state of SparkConnect.
|
|
||||||
type SparkConnectStatus struct {
|
|
||||||
// Represents the latest available observations of a SparkConnect's current state.
|
|
||||||
// +patchMergeKey=type
|
|
||||||
// +patchStrategy=merge
|
|
||||||
// +listType=map
|
|
||||||
// +listMapKey=type
|
|
||||||
// +optional
|
|
||||||
Conditions []metav1.Condition `json:"conditions,omitempty" patchMergeKey:"type" patchStrategy:"merge"`
|
|
||||||
|
|
||||||
// State represents the current state of the SparkConnect.
|
|
||||||
State SparkConnectState `json:"state,omitempty"`
|
|
||||||
|
|
||||||
// Server represents the current state of the SparkConnect server.
|
|
||||||
Server SparkConnectServerStatus `json:"server,omitempty"`
|
|
||||||
|
|
||||||
// Executors represents the current state of the SparkConnect executors.
|
|
||||||
Executors map[string]int `json:"executors,omitempty"`
|
|
||||||
|
|
||||||
// StartTime is the time at which the SparkConnect controller started processing the SparkConnect.
|
|
||||||
StartTime metav1.Time `json:"startTime,omitempty"`
|
|
||||||
|
|
||||||
// LastUpdateTime is the time at which the SparkConnect controller last updated the SparkConnect.
|
|
||||||
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SparkConnectConditionType represents the condition types of the SparkConnect.
|
|
||||||
type SparkConnectConditionType string
|
|
||||||
|
|
||||||
// All possible condition types of the SparkConnect.
|
|
||||||
const (
|
|
||||||
SparkConnectConditionServerPodReady SparkConnectConditionType = "ServerPodReady"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SparkConnectConditionReason represents the reason of SparkConnect conditions.
|
|
||||||
type SparkConnectConditionReason string
|
|
||||||
|
|
||||||
// All possible reasons of SparkConnect conditions.
|
|
||||||
const (
|
|
||||||
SparkConnectConditionReasonServerPodReady SparkConnectConditionReason = "ServerPodReady"
|
|
||||||
SparkConnectConditionReasonServerPodNotReady SparkConnectConditionReason = "ServerPodNotReady"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SparkConnectState represents the current state of the SparkConnect.
|
|
||||||
type SparkConnectState string
|
|
||||||
|
|
||||||
// All possible states of the SparkConnect.
|
|
||||||
const (
|
|
||||||
SparkConnectStateNew SparkConnectState = ""
|
|
||||||
SparkConnectStateProvisioning SparkConnectState = "Provisioning"
|
|
||||||
SparkConnectStateReady SparkConnectState = "Ready"
|
|
||||||
SparkConnectStateNotReady SparkConnectState = "NotReady"
|
|
||||||
SparkConnectStateFailed SparkConnectState = "Failed"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SparkConnectServerStatus struct {
|
|
||||||
// PodName is the name of the pod that is running the Spark Connect server.
|
|
||||||
PodName string `json:"podName,omitempty"`
|
|
||||||
|
|
||||||
// PodIP is the IP address of the pod that is running the Spark Connect server.
|
|
||||||
PodIP string `json:"podIp,omitempty"`
|
|
||||||
|
|
||||||
// ServiceName is the name of the service that is exposing the Spark Connect server.
|
|
||||||
ServiceName string `json:"serviceName,omitempty"`
|
|
||||||
}
|
|
|
@ -1,281 +0,0 @@
|
||||||
//go:build !ignore_autogenerated
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright 2025 The Kubeflow authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Code generated by controller-gen. DO NOT EDIT.
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *DynamicAllocation) DeepCopyInto(out *DynamicAllocation) {
|
|
||||||
*out = *in
|
|
||||||
if in.InitialExecutors != nil {
|
|
||||||
in, out := &in.InitialExecutors, &out.InitialExecutors
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.MinExecutors != nil {
|
|
||||||
in, out := &in.MinExecutors, &out.MinExecutors
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.MaxExecutors != nil {
|
|
||||||
in, out := &in.MaxExecutors, &out.MaxExecutors
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.ShuffleTrackingEnabled != nil {
|
|
||||||
in, out := &in.ShuffleTrackingEnabled, &out.ShuffleTrackingEnabled
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.ShuffleTrackingTimeout != nil {
|
|
||||||
in, out := &in.ShuffleTrackingTimeout, &out.ShuffleTrackingTimeout
|
|
||||||
*out = new(int64)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicAllocation.
|
|
||||||
func (in *DynamicAllocation) DeepCopy() *DynamicAllocation {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(DynamicAllocation)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) {
|
|
||||||
*out = *in
|
|
||||||
in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec)
|
|
||||||
if in.Instances != nil {
|
|
||||||
in, out := &in.Instances, &out.Instances
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec.
|
|
||||||
func (in *ExecutorSpec) DeepCopy() *ExecutorSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ExecutorSpec)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ServerSpec) DeepCopyInto(out *ServerSpec) {
|
|
||||||
*out = *in
|
|
||||||
in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSpec.
|
|
||||||
func (in *ServerSpec) DeepCopy() *ServerSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ServerSpec)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *SparkConnect) DeepCopyInto(out *SparkConnect) {
|
|
||||||
*out = *in
|
|
||||||
out.TypeMeta = in.TypeMeta
|
|
||||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
|
||||||
in.Spec.DeepCopyInto(&out.Spec)
|
|
||||||
in.Status.DeepCopyInto(&out.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConnect.
|
|
||||||
func (in *SparkConnect) DeepCopy() *SparkConnect {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(SparkConnect)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
|
||||||
func (in *SparkConnect) DeepCopyObject() runtime.Object {
|
|
||||||
if c := in.DeepCopy(); c != nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *SparkConnectList) DeepCopyInto(out *SparkConnectList) {
|
|
||||||
*out = *in
|
|
||||||
out.TypeMeta = in.TypeMeta
|
|
||||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
|
||||||
if in.Items != nil {
|
|
||||||
in, out := &in.Items, &out.Items
|
|
||||||
*out = make([]SparkConnect, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConnectList.
|
|
||||||
func (in *SparkConnectList) DeepCopy() *SparkConnectList {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(SparkConnectList)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
|
||||||
func (in *SparkConnectList) DeepCopyObject() runtime.Object {
|
|
||||||
if c := in.DeepCopy(); c != nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *SparkConnectServerStatus) DeepCopyInto(out *SparkConnectServerStatus) {
|
|
||||||
*out = *in
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConnectServerStatus.
|
|
||||||
func (in *SparkConnectServerStatus) DeepCopy() *SparkConnectServerStatus {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(SparkConnectServerStatus)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *SparkConnectSpec) DeepCopyInto(out *SparkConnectSpec) {
|
|
||||||
*out = *in
|
|
||||||
if in.Image != nil {
|
|
||||||
in, out := &in.Image, &out.Image
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.HadoopConf != nil {
|
|
||||||
in, out := &in.HadoopConf, &out.HadoopConf
|
|
||||||
*out = make(map[string]string, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.SparkConf != nil {
|
|
||||||
in, out := &in.SparkConf, &out.SparkConf
|
|
||||||
*out = make(map[string]string, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
in.Server.DeepCopyInto(&out.Server)
|
|
||||||
in.Executor.DeepCopyInto(&out.Executor)
|
|
||||||
if in.DynamicAllocation != nil {
|
|
||||||
in, out := &in.DynamicAllocation, &out.DynamicAllocation
|
|
||||||
*out = new(DynamicAllocation)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConnectSpec.
|
|
||||||
func (in *SparkConnectSpec) DeepCopy() *SparkConnectSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(SparkConnectSpec)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *SparkConnectStatus) DeepCopyInto(out *SparkConnectStatus) {
|
|
||||||
*out = *in
|
|
||||||
if in.Conditions != nil {
|
|
||||||
in, out := &in.Conditions, &out.Conditions
|
|
||||||
*out = make([]metav1.Condition, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out.Server = in.Server
|
|
||||||
if in.Executors != nil {
|
|
||||||
in, out := &in.Executors, &out.Executors
|
|
||||||
*out = make(map[string]int, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
in.StartTime.DeepCopyInto(&out.StartTime)
|
|
||||||
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConnectStatus.
|
|
||||||
func (in *SparkConnectStatus) DeepCopy() *SparkConnectStatus {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(SparkConnectStatus)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) {
|
|
||||||
*out = *in
|
|
||||||
if in.Cores != nil {
|
|
||||||
in, out := &in.Cores, &out.Cores
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.Memory != nil {
|
|
||||||
in, out := &in.Memory, &out.Memory
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.Template != nil {
|
|
||||||
in, out := &in.Template, &out.Template
|
|
||||||
*out = new(v1.PodTemplateSpec)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec.
|
|
||||||
func (in *SparkPodSpec) DeepCopy() *SparkPodSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(SparkPodSpec)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
|
@ -0,0 +1,74 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 Google LLC
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1beta1
|
||||||
|
|
||||||
|
// SetSparkApplicationDefaults sets default values for certain fields of a SparkApplication.
|
||||||
|
func SetSparkApplicationDefaults(app *SparkApplication) {
|
||||||
|
if app == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if app.Spec.Mode == "" {
|
||||||
|
app.Spec.Mode = ClusterMode
|
||||||
|
}
|
||||||
|
|
||||||
|
if app.Spec.RestartPolicy.Type == "" {
|
||||||
|
app.Spec.RestartPolicy.Type = Never
|
||||||
|
}
|
||||||
|
|
||||||
|
if app.Spec.RestartPolicy.Type != Never {
|
||||||
|
// Default to 5 sec if the RestartPolicy is OnFailure or Always and these values aren't specified.
|
||||||
|
if app.Spec.RestartPolicy.OnFailureRetryInterval == nil {
|
||||||
|
app.Spec.RestartPolicy.OnFailureRetryInterval = new(int64)
|
||||||
|
*app.Spec.RestartPolicy.OnFailureRetryInterval = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
if app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval == nil {
|
||||||
|
app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = new(int64)
|
||||||
|
*app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
setDriverSpecDefaults(&app.Spec.Driver, app.Spec.SparkConf)
|
||||||
|
setExecutorSpecDefaults(&app.Spec.Executor, app.Spec.SparkConf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDriverSpecDefaults(spec *DriverSpec, sparkConf map[string]string) {
|
||||||
|
if _, exists := sparkConf["spark.driver.cores"]; !exists && spec.Cores == nil {
|
||||||
|
spec.Cores = new(float32)
|
||||||
|
*spec.Cores = 1
|
||||||
|
}
|
||||||
|
if _, exists := sparkConf["spark.driver.memory"]; !exists && spec.Memory == nil {
|
||||||
|
spec.Memory = new(string)
|
||||||
|
*spec.Memory = "1g"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setExecutorSpecDefaults(spec *ExecutorSpec, sparkConf map[string]string) {
|
||||||
|
if _, exists := sparkConf["spark.executor.cores"]; !exists && spec.Cores == nil {
|
||||||
|
spec.Cores = new(float32)
|
||||||
|
*spec.Cores = 1
|
||||||
|
}
|
||||||
|
if _, exists := sparkConf["spark.executor.memory"]; !exists && spec.Memory == nil {
|
||||||
|
spec.Memory = new(string)
|
||||||
|
*spec.Memory = "1g"
|
||||||
|
}
|
||||||
|
if _, exists := sparkConf["spark.executor.instances"]; !exists && spec.Instances == nil {
|
||||||
|
spec.Instances = new(int32)
|
||||||
|
*spec.Instances = 1
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 The Kubeflow authors.
|
Copyright 2017 Google LLC
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
@ -16,7 +16,6 @@ limitations under the License.
|
||||||
|
|
||||||
// +k8s:deepcopy-gen=package,register
|
// +k8s:deepcopy-gen=package,register
|
||||||
|
|
||||||
// Package v1alpha1 is the v1alpha1 version of the API.
|
// Package v1beta1 is the v1beta1 version of the API.
|
||||||
// +groupName=sparkoperator.k8s.io
|
// +groupName=sparkoperator.k8s.io
|
||||||
// +versionName=v1alpha1
|
package v1beta1
|
||||||
package v1alpha1
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 The Kubeflow authors.
|
Copyright 2024 The Kubeflow authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package v1alpha1 contains API Schema definitions for the v1alpha1 API group
|
// Package v1beta1 contains API Schema definitions for the v1beta1 API group
|
||||||
// +kubebuilder:object:generate=true
|
// +kubebuilder:object:generate=true
|
||||||
// +groupName=sparkoperator.k8s.io
|
// +groupName=sparkoperator.k8s.io
|
||||||
package v1alpha1
|
package v1beta1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
@ -26,7 +26,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// GroupVersion is group version used to register these objects.
|
// GroupVersion is group version used to register these objects.
|
||||||
GroupVersion = schema.GroupVersion{Group: "sparkoperator.k8s.io", Version: "v1alpha1"}
|
GroupVersion = schema.GroupVersion{Group: "sparkoperator.k8s.io", Version: "v1beta1"}
|
||||||
|
|
||||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 The Kubeflow authors.
|
Copyright 2024 The Kubeflow authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package v1alpha1
|
package v1beta1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
@ -22,7 +22,7 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Group = "sparkoperator.k8s.io"
|
Group = "sparkoperator.k8s.io"
|
||||||
Version = "v1alpha1"
|
Version = "v1beta1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SchemeGroupVersion is the group version used to register these objects.
|
// SchemeGroupVersion is the group version used to register these objects.
|
|
@ -0,0 +1,104 @@
|
||||||
|
/*
|
||||||
|
Copyright 2024 The Kubeflow authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1beta1
|
||||||
|
|
||||||
|
import (
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||||
|
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||||
|
|
||||||
|
// +kubebuilder:skip
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
SchemeBuilder.Register(&ScheduledSparkApplication{}, &ScheduledSparkApplicationList{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScheduledSparkApplicationSpec defines the desired state of ScheduledSparkApplication
|
||||||
|
type ScheduledSparkApplicationSpec struct {
|
||||||
|
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||||
|
// Important: Run "make generate" to regenerate code after modifying this file
|
||||||
|
|
||||||
|
// Schedule is a cron schedule on which the application should run.
|
||||||
|
Schedule string `json:"schedule"`
|
||||||
|
// Template is a template from which SparkApplication instances can be created.
|
||||||
|
Template SparkApplicationSpec `json:"template"`
|
||||||
|
// Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true.
|
||||||
|
// Optional.
|
||||||
|
// Defaults to false.
|
||||||
|
Suspend *bool `json:"suspend,omitempty"`
|
||||||
|
// ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.
|
||||||
|
ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
|
||||||
|
// SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep.
|
||||||
|
// Optional.
|
||||||
|
// Defaults to 1.
|
||||||
|
SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"`
|
||||||
|
// FailedRunHistoryLimit is the number of past failed runs of the application to keep.
|
||||||
|
// Optional.
|
||||||
|
// Defaults to 1.
|
||||||
|
FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScheduledSparkApplicationStatus defines the observed state of ScheduledSparkApplication
|
||||||
|
type ScheduledSparkApplicationStatus struct {
|
||||||
|
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||||
|
// Important: Run "make generate" to regenerate code after modifying this file
|
||||||
|
|
||||||
|
// LastRun is the time when the last run of the application started.
|
||||||
|
LastRun metav1.Time `json:"lastRun,omitempty"`
|
||||||
|
// NextRun is the time when the next run of the application will start.
|
||||||
|
NextRun metav1.Time `json:"nextRun,omitempty"`
|
||||||
|
// LastRunName is the name of the SparkApplication for the most recent run of the application.
|
||||||
|
LastRunName string `json:"lastRunName,omitempty"`
|
||||||
|
// PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs.
|
||||||
|
PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"`
|
||||||
|
// PastFailedRunNames keeps the names of SparkApplications for past failed runs.
|
||||||
|
PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"`
|
||||||
|
// ScheduleState is the current scheduling state of the application.
|
||||||
|
ScheduleState ScheduleState `json:"scheduleState,omitempty"`
|
||||||
|
// Reason tells why the ScheduledSparkApplication is in the particular ScheduleState.
|
||||||
|
Reason string `json:"reason,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:subresource:status
|
||||||
|
|
||||||
|
// ScheduledSparkApplication is the Schema for the scheduledsparkapplications API
|
||||||
|
type ScheduledSparkApplication struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
|
Spec ScheduledSparkApplicationSpec `json:"spec,omitempty"`
|
||||||
|
Status ScheduledSparkApplicationStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
|
// ScheduledSparkApplicationList contains a list of ScheduledSparkApplication
|
||||||
|
type ScheduledSparkApplicationList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty"`
|
||||||
|
Items []ScheduledSparkApplication `json:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScheduleState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
FailedValidationState ScheduleState = "FailedValidation"
|
||||||
|
ScheduledState ScheduleState = "Scheduled"
|
||||||
|
)
|
|
@ -0,0 +1,497 @@
|
||||||
|
/*
|
||||||
|
Copyright 2024 The Kubeflow authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// +kubebuilder:skip
|
||||||
|
|
||||||
|
package v1beta1
|
||||||
|
|
||||||
|
import (
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||||
|
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||||
|
|
||||||
|
// +kubebuilder:skip
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
SchemeBuilder.Register(&SparkApplication{}, &SparkApplicationList{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SparkApplicationSpec defines the desired state of SparkApplication
|
||||||
|
type SparkApplicationSpec struct {
|
||||||
|
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||||
|
// Important: Run "make generate" to regenerate code after modifying this file
|
||||||
|
|
||||||
|
// Type tells the type of the Spark application.
|
||||||
|
Type SparkApplicationType `json:"type"`
|
||||||
|
// SparkVersion is the version of Spark the application uses.
|
||||||
|
SparkVersion string `json:"sparkVersion"`
|
||||||
|
// Mode is the deployment mode of the Spark application.
|
||||||
|
Mode DeployMode `json:"mode,omitempty"`
|
||||||
|
// Image is the container image for the driver, executor, and init-container. Any custom container images for the
|
||||||
|
// driver, executor, or init-container takes precedence over this.
|
||||||
|
// Optional.
|
||||||
|
Image *string `json:"image,omitempty"`
|
||||||
|
// InitContainerImage is the image of the init-container to use. Overrides Spec.Image if set.
|
||||||
|
// Optional.
|
||||||
|
InitContainerImage *string `json:"initContainerImage,omitempty"`
|
||||||
|
// ImagePullPolicy is the image pull policy for the driver, executor, and init-container.
|
||||||
|
// Optional.
|
||||||
|
ImagePullPolicy *string `json:"imagePullPolicy,omitempty"`
|
||||||
|
// ImagePullSecrets is the list of image-pull secrets.
|
||||||
|
// Optional.
|
||||||
|
ImagePullSecrets []string `json:"imagePullSecrets,omitempty"`
|
||||||
|
// MainClass is the fully-qualified main class of the Spark application.
|
||||||
|
// This only applies to Java/Scala Spark applications.
|
||||||
|
// Optional.
|
||||||
|
MainClass *string `json:"mainClass,omitempty"`
|
||||||
|
// MainFile is the path to a bundled JAR, Python, or R file of the application.
|
||||||
|
// Optional.
|
||||||
|
MainApplicationFile *string `json:"mainApplicationFile"`
|
||||||
|
// Arguments is a list of arguments to be passed to the application.
|
||||||
|
// Optional.
|
||||||
|
Arguments []string `json:"arguments,omitempty"`
|
||||||
|
// SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in
|
||||||
|
// spark-submit.
|
||||||
|
// Optional.
|
||||||
|
SparkConf map[string]string `json:"sparkConf,omitempty"`
|
||||||
|
// HadoopConf carries user-specified Hadoop configuration properties as they would use the "--conf" option
|
||||||
|
// in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop
|
||||||
|
// configuration properties.
|
||||||
|
// Optional.
|
||||||
|
HadoopConf map[string]string `json:"hadoopConf,omitempty"`
|
||||||
|
// SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties.
|
||||||
|
// The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to.
|
||||||
|
// Optional.
|
||||||
|
SparkConfigMap *string `json:"sparkConfigMap,omitempty"`
|
||||||
|
// HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml.
|
||||||
|
// The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to.
|
||||||
|
// Optional.
|
||||||
|
HadoopConfigMap *string `json:"hadoopConfigMap,omitempty"`
|
||||||
|
// Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors.
|
||||||
|
// Optional.
|
||||||
|
Volumes []corev1.Volume `json:"volumes,omitempty"`
|
||||||
|
// Driver is the driver specification.
|
||||||
|
Driver DriverSpec `json:"driver"`
|
||||||
|
// Executor is the executor specification.
|
||||||
|
Executor ExecutorSpec `json:"executor"`
|
||||||
|
// Deps captures all possible types of dependencies of a Spark application.
|
||||||
|
Deps Dependencies `json:"deps"`
|
||||||
|
// RestartPolicy defines the policy on if and in which conditions the controller should restart an application.
|
||||||
|
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"`
|
||||||
|
// NodeSelector is the Kubernetes node selector to be added to the driver and executor pods.
|
||||||
|
// This field is mutually exclusive with nodeSelector at podSpec level (driver or executor).
|
||||||
|
// This field will be deprecated in future versions (at SparkApplicationSpec level).
|
||||||
|
// Optional.
|
||||||
|
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||||
|
// FailureRetries is the number of times to retry a failed application before giving up.
|
||||||
|
// This is best effort and actual retry attempts can be >= the value specified.
|
||||||
|
// Optional.
|
||||||
|
FailureRetries *int32 `json:"failureRetries,omitempty"`
|
||||||
|
// RetryInterval is the unit of intervals in seconds between submission retries.
|
||||||
|
// Optional.
|
||||||
|
RetryInterval *int64 `json:"retryInterval,omitempty"`
|
||||||
|
// This sets the major Python version of the docker
|
||||||
|
// image used to run the driver and executor containers. Can either be 2 or 3, default 2.
|
||||||
|
// Optional.
|
||||||
|
PythonVersion *string `json:"pythonVersion,omitempty"`
|
||||||
|
// This sets the Memory Overhead Factor that will allocate memory to non-JVM memory.
|
||||||
|
// For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will
|
||||||
|
// be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set.
|
||||||
|
// Optional.
|
||||||
|
MemoryOverheadFactor *string `json:"memoryOverheadFactor,omitempty"`
|
||||||
|
// Monitoring configures how monitoring is handled.
|
||||||
|
// Optional.
|
||||||
|
Monitoring *MonitoringSpec `json:"monitoring,omitempty"`
|
||||||
|
// BatchScheduler configures which batch scheduler will be used for scheduling
|
||||||
|
// Optional.
|
||||||
|
BatchScheduler *string `json:"batchScheduler,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SparkApplicationStatus defines the observed state of SparkApplication
|
||||||
|
type SparkApplicationStatus struct {
|
||||||
|
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||||
|
// Important: Run "make generate" to regenerate code after modifying this file
|
||||||
|
|
||||||
|
// SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods
|
||||||
|
SparkApplicationID string `json:"sparkApplicationId,omitempty"`
|
||||||
|
// SubmissionID is a unique ID of the current submission of the application.
|
||||||
|
SubmissionID string `json:"submissionID,omitempty"`
|
||||||
|
// LastSubmissionAttemptTime is the time for the last application submission attempt.
|
||||||
|
LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"`
|
||||||
|
// CompletionTime is the time when the application runs to completion if it does.
|
||||||
|
TerminationTime metav1.Time `json:"terminationTime,omitempty"`
|
||||||
|
// DriverInfo has information about the driver.
|
||||||
|
DriverInfo DriverInfo `json:"driverInfo"`
|
||||||
|
// AppState tells the overall application state.
|
||||||
|
AppState ApplicationState `json:"applicationState,omitempty"`
|
||||||
|
// ExecutorState records the state of executors by executor Pod names.
|
||||||
|
ExecutorState map[string]ExecutorState `json:"executorState,omitempty"`
|
||||||
|
// ExecutionAttempts is the total number of attempts to run a submitted application to completion.
|
||||||
|
// Incremented upon each attempted run of the application and reset upon invalidation.
|
||||||
|
ExecutionAttempts int32 `json:"executionAttempts,omitempty"`
|
||||||
|
// SubmissionAttempts is the total number of attempts to submit an application to run.
|
||||||
|
// Incremented upon each attempted submission of the application and reset upon invalidation and rerun.
|
||||||
|
SubmissionAttempts int32 `json:"submissionAttempts,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:subresource:status
|
||||||
|
|
||||||
|
// SparkApplication is the Schema for the sparkapplications API
|
||||||
|
type SparkApplication struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
|
Spec SparkApplicationSpec `json:"spec,omitempty"`
|
||||||
|
Status SparkApplicationStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
|
// SparkApplicationList contains a list of SparkApplication
|
||||||
|
type SparkApplicationList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty"`
|
||||||
|
Items []SparkApplication `json:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SparkApplicationType describes the type of a Spark application.
|
||||||
|
type SparkApplicationType string
|
||||||
|
|
||||||
|
// Different types of Spark applications.
|
||||||
|
const (
|
||||||
|
JavaApplicationType SparkApplicationType = "Java"
|
||||||
|
ScalaApplicationType SparkApplicationType = "Scala"
|
||||||
|
PythonApplicationType SparkApplicationType = "Python"
|
||||||
|
RApplicationType SparkApplicationType = "R"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeployMode describes the type of deployment of a Spark application.
|
||||||
|
type DeployMode string
|
||||||
|
|
||||||
|
// Different types of deployments.
|
||||||
|
const (
|
||||||
|
ClusterMode DeployMode = "cluster"
|
||||||
|
ClientMode DeployMode = "client"
|
||||||
|
InClusterClientMode DeployMode = "in-cluster-client"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application.
|
||||||
|
// This completely defines actions to be taken on any kind of Failures during an application run.
|
||||||
|
type RestartPolicy struct {
|
||||||
|
Type RestartPolicyType `json:"type,omitempty"`
|
||||||
|
|
||||||
|
// FailureRetries are the number of times to retry a failed application before giving up in a particular case.
|
||||||
|
// This is best effort and actual retry attempts can be >= the value specified due to caching.
|
||||||
|
// These are required if RestartPolicy is OnFailure.
|
||||||
|
OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"`
|
||||||
|
OnFailureRetries *int32 `json:"onFailureRetries,omitempty"`
|
||||||
|
|
||||||
|
// Interval to wait between successive retries of a failed application.
|
||||||
|
OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"`
|
||||||
|
OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RestartPolicyType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
Never RestartPolicyType = "Never"
|
||||||
|
OnFailure RestartPolicyType = "OnFailure"
|
||||||
|
Always RestartPolicyType = "Always"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ConcurrencyPolicy string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ConcurrencyAllow allows SparkApplications to run concurrently.
|
||||||
|
ConcurrencyAllow ConcurrencyPolicy = "Allow"
|
||||||
|
// ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous
|
||||||
|
// one hasn't finished yet.
|
||||||
|
ConcurrencyForbid ConcurrencyPolicy = "Forbid"
|
||||||
|
// ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one.
|
||||||
|
ConcurrencyReplace ConcurrencyPolicy = "Replace"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ApplicationStateType represents the type of the current state of an application.
|
||||||
|
type ApplicationStateType string
|
||||||
|
|
||||||
|
// Different states an application may have.
|
||||||
|
const (
|
||||||
|
NewState ApplicationStateType = ""
|
||||||
|
SubmittedState ApplicationStateType = "SUBMITTED"
|
||||||
|
RunningState ApplicationStateType = "RUNNING"
|
||||||
|
CompletedState ApplicationStateType = "COMPLETED"
|
||||||
|
FailedState ApplicationStateType = "FAILED"
|
||||||
|
FailedSubmissionState ApplicationStateType = "SUBMISSION_FAILED"
|
||||||
|
PendingRerunState ApplicationStateType = "PENDING_RERUN"
|
||||||
|
InvalidatingState ApplicationStateType = "INVALIDATING"
|
||||||
|
SucceedingState ApplicationStateType = "SUCCEEDING"
|
||||||
|
FailingState ApplicationStateType = "FAILING"
|
||||||
|
UnknownState ApplicationStateType = "UNKNOWN"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ApplicationState tells the current state of the application and an error message in case of failures.
|
||||||
|
type ApplicationState struct {
|
||||||
|
State ApplicationStateType `json:"state"`
|
||||||
|
ErrorMessage string `json:"errorMessage,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecutorState tells the current state of an executor.
|
||||||
|
type ExecutorState string
|
||||||
|
|
||||||
|
// Different states an executor may have.
|
||||||
|
const (
|
||||||
|
ExecutorPendingState ExecutorState = "PENDING"
|
||||||
|
ExecutorRunningState ExecutorState = "RUNNING"
|
||||||
|
ExecutorCompletedState ExecutorState = "COMPLETED"
|
||||||
|
ExecutorFailedState ExecutorState = "FAILED"
|
||||||
|
ExecutorUnknownState ExecutorState = "UNKNOWN"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Dependencies specifies all possible types of dependencies of a Spark application.
|
||||||
|
type Dependencies struct {
|
||||||
|
// Jars is a list of JAR files the Spark application depends on.
|
||||||
|
// Optional.
|
||||||
|
Jars []string `json:"jars,omitempty"`
|
||||||
|
// Files is a list of files the Spark application depends on.
|
||||||
|
// Optional.
|
||||||
|
Files []string `json:"files,omitempty"`
|
||||||
|
// PyFiles is a list of Python files the Spark application depends on.
|
||||||
|
// Optional.
|
||||||
|
PyFiles []string `json:"pyFiles,omitempty"`
|
||||||
|
// JarsDownloadDir is the location to download jars to in the driver and executors.
|
||||||
|
JarsDownloadDir *string `json:"jarsDownloadDir,omitempty"`
|
||||||
|
// FilesDownloadDir is the location to download files to in the driver and executors.
|
||||||
|
FilesDownloadDir *string `json:"filesDownloadDir,omitempty"`
|
||||||
|
// DownloadTimeout specifies the timeout in seconds before aborting the attempt to download
|
||||||
|
// and unpack dependencies from remote locations into the driver and executor pods.
|
||||||
|
DownloadTimeout *int32 `json:"downloadTimeout,omitempty"`
|
||||||
|
// MaxSimultaneousDownloads specifies the maximum number of remote dependencies to download
|
||||||
|
// simultaneously in a driver or executor pod.
|
||||||
|
MaxSimultaneousDownloads *int32 `json:"maxSimultaneousDownloads,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SparkPodSpec defines common things that can be customized for a Spark driver or executor pod.
|
||||||
|
// TODO: investigate if we should use v1.PodSpec and limit what can be set instead.
|
||||||
|
type SparkPodSpec struct {
|
||||||
|
// Cores is the number of CPU cores to request for the pod.
|
||||||
|
// Optional.
|
||||||
|
Cores *float32 `json:"cores,omitempty"`
|
||||||
|
// CoreLimit specifies a hard limit on CPU cores for the pod.
|
||||||
|
// Optional
|
||||||
|
CoreLimit *string `json:"coreLimit,omitempty"`
|
||||||
|
// Memory is the amount of memory to request for the pod.
|
||||||
|
// Optional.
|
||||||
|
Memory *string `json:"memory,omitempty"`
|
||||||
|
// MemoryOverhead is the amount of off-heap memory to allocate in cluster mode, in MiB unless otherwise specified.
|
||||||
|
// Optional.
|
||||||
|
MemoryOverhead *string `json:"memoryOverhead,omitempty"`
|
||||||
|
// GPU specifies GPU requirement for the pod.
|
||||||
|
// Optional.
|
||||||
|
GPU *GPUSpec `json:"gpu,omitempty"`
|
||||||
|
// Image is the container image to use. Overrides Spec.Image if set.
|
||||||
|
// Optional.
|
||||||
|
Image *string `json:"image,omitempty"`
|
||||||
|
// ConfigMaps carries information of other ConfigMaps to add to the pod.
|
||||||
|
// Optional.
|
||||||
|
ConfigMaps []NamePath `json:"configMaps,omitempty"`
|
||||||
|
// Secrets carries information of secrets to add to the pod.
|
||||||
|
// Optional.
|
||||||
|
Secrets []SecretInfo `json:"secrets,omitempty"`
|
||||||
|
// EnvVars carries the environment variables to add to the pod.
|
||||||
|
// Optional.
|
||||||
|
EnvVars map[string]string `json:"envVars,omitempty"`
|
||||||
|
// EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs.
|
||||||
|
// Optional.
|
||||||
|
EnvSecretKeyRefs map[string]NameKey `json:"envSecretKeyRefs,omitempty"`
|
||||||
|
// Labels are the Kubernetes labels to be added to the pod.
|
||||||
|
// Optional.
|
||||||
|
Labels map[string]string `json:"labels,omitempty"`
|
||||||
|
// Annotations are the Kubernetes annotations to be added to the pod.
|
||||||
|
// Optional.
|
||||||
|
Annotations map[string]string `json:"annotations,omitempty"`
|
||||||
|
// VolumeMounts specifies the volumes listed in ".spec.volumes" to mount into the main container's filesystem.
|
||||||
|
// Optional.
|
||||||
|
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||||
|
// Affinity specifies the affinity/anti-affinity settings for the pod.
|
||||||
|
// Optional.
|
||||||
|
Affinity *corev1.Affinity `json:"affinity,omitempty"`
|
||||||
|
// Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod.
|
||||||
|
// Optional.
|
||||||
|
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||||
|
// SecurityContext specifies the PodSecurityContext to apply.
|
||||||
|
// Optional.
|
||||||
|
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||||
|
// SchedulerName specifies the scheduler that will be used for scheduling
|
||||||
|
// Optional.
|
||||||
|
SchedulerName *string `json:"schedulerName,omitempty"`
|
||||||
|
// Sidecars is a list of sidecar containers that run along side the main Spark container.
|
||||||
|
// Optional.
|
||||||
|
Sidecars []corev1.Container `json:"sidecars,omitempty"`
|
||||||
|
// HostNetwork indicates whether to request host networking for the pod or not.
|
||||||
|
// Optional.
|
||||||
|
HostNetwork *bool `json:"hostNetwork,omitempty"`
|
||||||
|
// NodeSelector is the Kubernetes node selector to be added to the driver and executor pods.
|
||||||
|
// This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated).
|
||||||
|
// Optional.
|
||||||
|
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||||
|
// DnsConfig dns settings for the pod, following the Kubernetes specifications.
|
||||||
|
// Optional.
|
||||||
|
DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriverSpec is specification of the driver.
|
||||||
|
type DriverSpec struct {
|
||||||
|
SparkPodSpec `json:",inline"`
|
||||||
|
// PodName is the name of the driver pod that the user creates. This is used for the
|
||||||
|
// in-cluster client mode in which the user creates a client pod where the driver of
|
||||||
|
// the user application runs. It's an error to set this field if Mode is not
|
||||||
|
// in-cluster-client.
|
||||||
|
// Optional.
|
||||||
|
PodName *string `json:"podName,omitempty"`
|
||||||
|
// ServiceAccount is the name of the Kubernetes service account used by the driver pod
|
||||||
|
// when requesting executor pods from the API server.
|
||||||
|
ServiceAccount *string `json:"serviceAccount,omitempty"`
|
||||||
|
// JavaOptions is a string of extra JVM options to pass to the driver. For instance,
|
||||||
|
// GC settings or other logging.
|
||||||
|
JavaOptions *string `json:"javaOptions,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecutorSpec is specification of the executor.
|
||||||
|
type ExecutorSpec struct {
|
||||||
|
SparkPodSpec `json:",inline"`
|
||||||
|
// Instances is the number of executor instances.
|
||||||
|
// Optional.
|
||||||
|
Instances *int32 `json:"instances,omitempty"`
|
||||||
|
// CoreRequest is the physical CPU core request for the executors.
|
||||||
|
// Optional.
|
||||||
|
CoreRequest *string `json:"coreRequest,omitempty"`
|
||||||
|
// JavaOptions is a string of extra JVM options to pass to the executors. For instance,
|
||||||
|
// GC settings or other logging.
|
||||||
|
JavaOptions *string `json:"javaOptions,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamePath is a pair of a name and a path to which the named objects should be mounted to.
|
||||||
|
type NamePath struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecretType tells the type of a secret.
|
||||||
|
type SecretType string
|
||||||
|
|
||||||
|
// An enumeration of secret types supported.
|
||||||
|
const (
|
||||||
|
// GCPServiceAccountSecret is for secrets from a GCP service account Json key file that needs
|
||||||
|
// the environment variable GOOGLE_APPLICATION_CREDENTIALS.
|
||||||
|
GCPServiceAccountSecret SecretType = "GCPServiceAccount"
|
||||||
|
// HadoopDelegationTokenSecret is for secrets from an Hadoop delegation token that needs the
|
||||||
|
// environment variable HADOOP_TOKEN_FILE_LOCATION.
|
||||||
|
HadoopDelegationTokenSecret SecretType = "HadoopDelegationToken"
|
||||||
|
// GenericType is for secrets that needs no special handling.
|
||||||
|
GenericType SecretType = "Generic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DriverInfo captures information about the driver.
|
||||||
|
type DriverInfo struct {
|
||||||
|
WebUIServiceName string `json:"webUIServiceName,omitempty"`
|
||||||
|
// UI Details for the UI created via ClusterIP service accessible from within the cluster.
|
||||||
|
WebUIPort int32 `json:"webUIPort,omitempty"`
|
||||||
|
WebUIAddress string `json:"webUIAddress,omitempty"`
|
||||||
|
// Ingress Details if an ingress for the UI was created.
|
||||||
|
WebUIIngressName string `json:"webUIIngressName,omitempty"`
|
||||||
|
WebUIIngressAddress string `json:"webUIIngressAddress,omitempty"`
|
||||||
|
PodName string `json:"podName,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecretInfo captures information of a secret.
|
||||||
|
type SecretInfo struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Type SecretType `json:"secretType"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameKey represents the name and key of a SecretKeyRef.
|
||||||
|
type NameKey struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitoringSpec defines the monitoring specification.
|
||||||
|
type MonitoringSpec struct {
|
||||||
|
// ExposeDriverMetrics specifies whether to expose metrics on the driver.
|
||||||
|
ExposeDriverMetrics bool `json:"exposeDriverMetrics"`
|
||||||
|
// ExposeExecutorMetrics specifies whether to expose metrics on the executors.
|
||||||
|
ExposeExecutorMetrics bool `json:"exposeExecutorMetrics"`
|
||||||
|
// MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system.
|
||||||
|
// Optional.
|
||||||
|
// If not specified, the content in spark-docker/conf/metrics.properties will be used.
|
||||||
|
MetricsProperties *string `json:"metricsProperties,omitempty"`
|
||||||
|
// Prometheus is for configuring the Prometheus JMX exporter.
|
||||||
|
// Optional.
|
||||||
|
Prometheus *PrometheusSpec `json:"prometheus,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrometheusSpec defines the Prometheus specification when Prometheus is to be used for
|
||||||
|
// collecting and exposing metrics.
|
||||||
|
type PrometheusSpec struct {
|
||||||
|
// JmxExporterJar is the path to the Prometheus JMX exporter jar in the container.
|
||||||
|
JmxExporterJar string `json:"jmxExporterJar"`
|
||||||
|
// Port is the port of the HTTP server run by the Prometheus JMX exporter.
|
||||||
|
// Optional.
|
||||||
|
// If not specified, 8090 will be used as the default.
|
||||||
|
Port *int32 `json:"port"`
|
||||||
|
// ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image.
|
||||||
|
// ConfigFile takes precedence over Configuration, which is shown below.
|
||||||
|
ConfigFile *string `json:"configFile,omitempty"`
|
||||||
|
// Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter.
|
||||||
|
// Optional.
|
||||||
|
// If not specified, the content in spark-docker/conf/prometheus.yaml will be used.
|
||||||
|
// Configuration has no effect if ConfigFile is set.
|
||||||
|
Configuration *string `json:"configuration,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GPUSpec struct {
|
||||||
|
// Name is GPU resource name, such as: nvidia.com/gpu or amd.com/gpu
|
||||||
|
Name string `json:"name"`
|
||||||
|
// Quantity is the number of GPUs to request for driver or executor.
|
||||||
|
Quantity int64 `json:"quantity"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrometheusMonitoringEnabled returns if Prometheus monitoring is enabled or not.
|
||||||
|
func (s *SparkApplication) PrometheusMonitoringEnabled() bool {
|
||||||
|
return s.Spec.Monitoring != nil && s.Spec.Monitoring.Prometheus != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasPrometheusConfigFile returns if Prometheus monitoring uses a configuration file in the container.
|
||||||
|
func (s *SparkApplication) HasPrometheusConfigFile() bool {
|
||||||
|
return s.PrometheusMonitoringEnabled() &&
|
||||||
|
s.Spec.Monitoring.Prometheus.ConfigFile != nil &&
|
||||||
|
*s.Spec.Monitoring.Prometheus.ConfigFile != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExposeDriverMetrics returns if driver metrics should be exposed.
|
||||||
|
func (s *SparkApplication) ExposeDriverMetrics() bool {
|
||||||
|
return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeDriverMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExposeExecutorMetrics returns if executor metrics should be exposed.
|
||||||
|
func (s *SparkApplication) ExposeExecutorMetrics() bool {
|
||||||
|
return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeExecutorMetrics
|
||||||
|
}
|
|
@ -0,0 +1,778 @@
|
||||||
|
//go:build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2024 The Kubeflow authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by controller-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1beta1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ApplicationState) DeepCopyInto(out *ApplicationState) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState.
|
||||||
|
func (in *ApplicationState) DeepCopy() *ApplicationState {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ApplicationState)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Dependencies) DeepCopyInto(out *Dependencies) {
|
||||||
|
*out = *in
|
||||||
|
if in.Jars != nil {
|
||||||
|
in, out := &in.Jars, &out.Jars
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Files != nil {
|
||||||
|
in, out := &in.Files, &out.Files
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.PyFiles != nil {
|
||||||
|
in, out := &in.PyFiles, &out.PyFiles
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.JarsDownloadDir != nil {
|
||||||
|
in, out := &in.JarsDownloadDir, &out.JarsDownloadDir
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.FilesDownloadDir != nil {
|
||||||
|
in, out := &in.FilesDownloadDir, &out.FilesDownloadDir
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DownloadTimeout != nil {
|
||||||
|
in, out := &in.DownloadTimeout, &out.DownloadTimeout
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MaxSimultaneousDownloads != nil {
|
||||||
|
in, out := &in.MaxSimultaneousDownloads, &out.MaxSimultaneousDownloads
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies.
|
||||||
|
func (in *Dependencies) DeepCopy() *Dependencies {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Dependencies)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DriverInfo) DeepCopyInto(out *DriverInfo) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo.
|
||||||
|
func (in *DriverInfo) DeepCopy() *DriverInfo {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DriverInfo)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *DriverSpec) DeepCopyInto(out *DriverSpec) {
|
||||||
|
*out = *in
|
||||||
|
in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec)
|
||||||
|
if in.PodName != nil {
|
||||||
|
in, out := &in.PodName, &out.PodName
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ServiceAccount != nil {
|
||||||
|
in, out := &in.ServiceAccount, &out.ServiceAccount
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.JavaOptions != nil {
|
||||||
|
in, out := &in.JavaOptions, &out.JavaOptions
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec.
|
||||||
|
func (in *DriverSpec) DeepCopy() *DriverSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(DriverSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) {
|
||||||
|
*out = *in
|
||||||
|
in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec)
|
||||||
|
if in.Instances != nil {
|
||||||
|
in, out := &in.Instances, &out.Instances
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.CoreRequest != nil {
|
||||||
|
in, out := &in.CoreRequest, &out.CoreRequest
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.JavaOptions != nil {
|
||||||
|
in, out := &in.JavaOptions, &out.JavaOptions
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec.
|
||||||
|
func (in *ExecutorSpec) DeepCopy() *ExecutorSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ExecutorSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *GPUSpec) DeepCopyInto(out *GPUSpec) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec.
|
||||||
|
func (in *GPUSpec) DeepCopy() *GPUSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(GPUSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.MetricsProperties != nil {
|
||||||
|
in, out := &in.MetricsProperties, &out.MetricsProperties
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Prometheus != nil {
|
||||||
|
in, out := &in.Prometheus, &out.Prometheus
|
||||||
|
*out = new(PrometheusSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec.
|
||||||
|
func (in *MonitoringSpec) DeepCopy() *MonitoringSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(MonitoringSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *NameKey) DeepCopyInto(out *NameKey) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey.
|
||||||
|
func (in *NameKey) DeepCopy() *NameKey {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(NameKey)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *NamePath) DeepCopyInto(out *NamePath) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath.
|
||||||
|
func (in *NamePath) DeepCopy() *NamePath {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(NamePath)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Port != nil {
|
||||||
|
in, out := &in.Port, &out.Port
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ConfigFile != nil {
|
||||||
|
in, out := &in.ConfigFile, &out.ConfigFile
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Configuration != nil {
|
||||||
|
in, out := &in.Configuration, &out.Configuration
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec.
|
||||||
|
func (in *PrometheusSpec) DeepCopy() *PrometheusSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PrometheusSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) {
|
||||||
|
*out = *in
|
||||||
|
if in.OnSubmissionFailureRetries != nil {
|
||||||
|
in, out := &in.OnSubmissionFailureRetries, &out.OnSubmissionFailureRetries
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.OnFailureRetries != nil {
|
||||||
|
in, out := &in.OnFailureRetries, &out.OnFailureRetries
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.OnSubmissionFailureRetryInterval != nil {
|
||||||
|
in, out := &in.OnSubmissionFailureRetryInterval, &out.OnSubmissionFailureRetryInterval
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.OnFailureRetryInterval != nil {
|
||||||
|
in, out := &in.OnFailureRetryInterval, &out.OnFailureRetryInterval
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy.
|
||||||
|
func (in *RestartPolicy) DeepCopy() *RestartPolicy {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RestartPolicy)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication.
|
||||||
|
func (in *ScheduledSparkApplication) DeepCopy() *ScheduledSparkApplication {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScheduledSparkApplication)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ScheduledSparkApplication) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplicationList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]ScheduledSparkApplication, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList.
|
||||||
|
func (in *ScheduledSparkApplicationList) DeepCopy() *ScheduledSparkApplicationList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScheduledSparkApplicationList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ScheduledSparkApplicationList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplicationSpec) {
|
||||||
|
*out = *in
|
||||||
|
in.Template.DeepCopyInto(&out.Template)
|
||||||
|
if in.Suspend != nil {
|
||||||
|
in, out := &in.Suspend, &out.Suspend
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.SuccessfulRunHistoryLimit != nil {
|
||||||
|
in, out := &in.SuccessfulRunHistoryLimit, &out.SuccessfulRunHistoryLimit
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.FailedRunHistoryLimit != nil {
|
||||||
|
in, out := &in.FailedRunHistoryLimit, &out.FailedRunHistoryLimit
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec.
|
||||||
|
func (in *ScheduledSparkApplicationSpec) DeepCopy() *ScheduledSparkApplicationSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScheduledSparkApplicationSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkApplicationStatus) {
|
||||||
|
*out = *in
|
||||||
|
in.LastRun.DeepCopyInto(&out.LastRun)
|
||||||
|
in.NextRun.DeepCopyInto(&out.NextRun)
|
||||||
|
if in.PastSuccessfulRunNames != nil {
|
||||||
|
in, out := &in.PastSuccessfulRunNames, &out.PastSuccessfulRunNames
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.PastFailedRunNames != nil {
|
||||||
|
in, out := &in.PastFailedRunNames, &out.PastFailedRunNames
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus.
|
||||||
|
func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplicationStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScheduledSparkApplicationStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *SecretInfo) DeepCopyInto(out *SecretInfo) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo.
|
||||||
|
func (in *SecretInfo) DeepCopy() *SecretInfo {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(SecretInfo)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *SparkApplication) DeepCopyInto(out *SparkApplication) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication.
|
||||||
|
func (in *SparkApplication) DeepCopy() *SparkApplication {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(SparkApplication)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *SparkApplication) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]SparkApplication, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList.
|
||||||
|
func (in *SparkApplicationList) DeepCopy() *SparkApplicationList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(SparkApplicationList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *SparkApplicationList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Image != nil {
|
||||||
|
in, out := &in.Image, &out.Image
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.InitContainerImage != nil {
|
||||||
|
in, out := &in.InitContainerImage, &out.InitContainerImage
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ImagePullPolicy != nil {
|
||||||
|
in, out := &in.ImagePullPolicy, &out.ImagePullPolicy
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ImagePullSecrets != nil {
|
||||||
|
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.MainClass != nil {
|
||||||
|
in, out := &in.MainClass, &out.MainClass
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MainApplicationFile != nil {
|
||||||
|
in, out := &in.MainApplicationFile, &out.MainApplicationFile
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Arguments != nil {
|
||||||
|
in, out := &in.Arguments, &out.Arguments
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.SparkConf != nil {
|
||||||
|
in, out := &in.SparkConf, &out.SparkConf
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.HadoopConf != nil {
|
||||||
|
in, out := &in.HadoopConf, &out.HadoopConf
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.SparkConfigMap != nil {
|
||||||
|
in, out := &in.SparkConfigMap, &out.SparkConfigMap
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.HadoopConfigMap != nil {
|
||||||
|
in, out := &in.HadoopConfigMap, &out.HadoopConfigMap
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Volumes != nil {
|
||||||
|
in, out := &in.Volumes, &out.Volumes
|
||||||
|
*out = make([]v1.Volume, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
in.Driver.DeepCopyInto(&out.Driver)
|
||||||
|
in.Executor.DeepCopyInto(&out.Executor)
|
||||||
|
in.Deps.DeepCopyInto(&out.Deps)
|
||||||
|
in.RestartPolicy.DeepCopyInto(&out.RestartPolicy)
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.FailureRetries != nil {
|
||||||
|
in, out := &in.FailureRetries, &out.FailureRetries
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.RetryInterval != nil {
|
||||||
|
in, out := &in.RetryInterval, &out.RetryInterval
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.PythonVersion != nil {
|
||||||
|
in, out := &in.PythonVersion, &out.PythonVersion
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MemoryOverheadFactor != nil {
|
||||||
|
in, out := &in.MemoryOverheadFactor, &out.MemoryOverheadFactor
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Monitoring != nil {
|
||||||
|
in, out := &in.Monitoring, &out.Monitoring
|
||||||
|
*out = new(MonitoringSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.BatchScheduler != nil {
|
||||||
|
in, out := &in.BatchScheduler, &out.BatchScheduler
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec.
|
||||||
|
func (in *SparkApplicationSpec) DeepCopy() *SparkApplicationSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(SparkApplicationSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) {
|
||||||
|
*out = *in
|
||||||
|
in.LastSubmissionAttemptTime.DeepCopyInto(&out.LastSubmissionAttemptTime)
|
||||||
|
in.TerminationTime.DeepCopyInto(&out.TerminationTime)
|
||||||
|
out.DriverInfo = in.DriverInfo
|
||||||
|
out.AppState = in.AppState
|
||||||
|
if in.ExecutorState != nil {
|
||||||
|
in, out := &in.ExecutorState, &out.ExecutorState
|
||||||
|
*out = make(map[string]ExecutorState, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus.
|
||||||
|
func (in *SparkApplicationStatus) DeepCopy() *SparkApplicationStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(SparkApplicationStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Cores != nil {
|
||||||
|
in, out := &in.Cores, &out.Cores
|
||||||
|
*out = new(float32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.CoreLimit != nil {
|
||||||
|
in, out := &in.CoreLimit, &out.CoreLimit
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Memory != nil {
|
||||||
|
in, out := &in.Memory, &out.Memory
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.MemoryOverhead != nil {
|
||||||
|
in, out := &in.MemoryOverhead, &out.MemoryOverhead
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.GPU != nil {
|
||||||
|
in, out := &in.GPU, &out.GPU
|
||||||
|
*out = new(GPUSpec)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Image != nil {
|
||||||
|
in, out := &in.Image, &out.Image
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ConfigMaps != nil {
|
||||||
|
in, out := &in.ConfigMaps, &out.ConfigMaps
|
||||||
|
*out = make([]NamePath, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Secrets != nil {
|
||||||
|
in, out := &in.Secrets, &out.Secrets
|
||||||
|
*out = make([]SecretInfo, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.EnvVars != nil {
|
||||||
|
in, out := &in.EnvVars, &out.EnvVars
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.EnvSecretKeyRefs != nil {
|
||||||
|
in, out := &in.EnvSecretKeyRefs, &out.EnvSecretKeyRefs
|
||||||
|
*out = make(map[string]NameKey, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.Labels != nil {
|
||||||
|
in, out := &in.Labels, &out.Labels
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.Annotations != nil {
|
||||||
|
in, out := &in.Annotations, &out.Annotations
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.VolumeMounts != nil {
|
||||||
|
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||||
|
*out = make([]v1.VolumeMount, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.Affinity != nil {
|
||||||
|
in, out := &in.Affinity, &out.Affinity
|
||||||
|
*out = new(v1.Affinity)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Tolerations != nil {
|
||||||
|
in, out := &in.Tolerations, &out.Tolerations
|
||||||
|
*out = make([]v1.Toleration, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.SecurityContext != nil {
|
||||||
|
in, out := &in.SecurityContext, &out.SecurityContext
|
||||||
|
*out = new(v1.PodSecurityContext)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.SchedulerName != nil {
|
||||||
|
in, out := &in.SchedulerName, &out.SchedulerName
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Sidecars != nil {
|
||||||
|
in, out := &in.Sidecars, &out.Sidecars
|
||||||
|
*out = make([]v1.Container, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.HostNetwork != nil {
|
||||||
|
in, out := &in.HostNetwork, &out.HostNetwork
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.DNSConfig != nil {
|
||||||
|
in, out := &in.DNSConfig, &out.DNSConfig
|
||||||
|
*out = new(v1.PodDNSConfig)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec.
|
||||||
|
func (in *SparkPodSpec) DeepCopy() *SparkPodSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(SparkPodSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
|
@ -34,12 +34,6 @@ type ScheduledSparkApplicationSpec struct {
|
||||||
|
|
||||||
// Schedule is a cron schedule on which the application should run.
|
// Schedule is a cron schedule on which the application should run.
|
||||||
Schedule string `json:"schedule"`
|
Schedule string `json:"schedule"`
|
||||||
// TimeZone is the time zone in which the cron schedule will be interpreted in.
|
|
||||||
// This value is passed to time.LoadLocation, so it must be either "Local", "UTC",
|
|
||||||
// or a valid IANA location name e.g. "America/New_York".
|
|
||||||
// +optional
|
|
||||||
// Defaults to "Local".
|
|
||||||
TimeZone string `json:"timeZone,omitempty"`
|
|
||||||
// Template is a template from which SparkApplication instances can be created.
|
// Template is a template from which SparkApplication instances can be created.
|
||||||
Template SparkApplicationSpec `json:"template"`
|
Template SparkApplicationSpec `json:"template"`
|
||||||
// Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true.
|
// Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true.
|
||||||
|
@ -86,25 +80,21 @@ type ScheduledSparkApplicationStatus struct {
|
||||||
// +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication
|
// +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
// +kubebuilder:printcolumn:JSONPath=.spec.schedule,name=Schedule,type=string
|
// +kubebuilder:printcolumn:JSONPath=.spec.schedule,name=Schedule,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=.spec.timeZone,name=TimeZone,type=string
|
|
||||||
// +kubebuilder:printcolumn:JSONPath=.spec.suspend,name=Suspend,type=string
|
// +kubebuilder:printcolumn:JSONPath=.spec.suspend,name=Suspend,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=.status.lastRun,name=Last Run,type=date
|
// +kubebuilder:printcolumn:JSONPath=.status.lastRun,name=Last Run,type=date
|
||||||
// +kubebuilder:printcolumn:JSONPath=.status.lastRunName,name=Last Run Name,type=string
|
// +kubebuilder:printcolumn:JSONPath=.status.lastRunName,name=Last Run Name,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date
|
// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
|
||||||
// +genclient
|
|
||||||
|
|
||||||
// ScheduledSparkApplication is the Schema for the scheduledsparkapplications API.
|
// ScheduledSparkApplication is the Schema for the scheduledsparkapplications API.
|
||||||
type ScheduledSparkApplication struct {
|
type ScheduledSparkApplication struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
metav1.ObjectMeta `json:"metadata"`
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
Spec ScheduledSparkApplicationSpec `json:"spec"`
|
Spec ScheduledSparkApplicationSpec `json:"spec,omitempty"`
|
||||||
Status ScheduledSparkApplicationStatus `json:"status,omitempty"`
|
Status ScheduledSparkApplicationStatus `json:"status,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
|
||||||
|
|
||||||
// ScheduledSparkApplicationList contains a list of ScheduledSparkApplication.
|
// ScheduledSparkApplicationList contains a list of ScheduledSparkApplication.
|
||||||
type ScheduledSparkApplicationList struct {
|
type ScheduledSparkApplicationList struct {
|
||||||
|
|
|
@ -62,6 +62,7 @@ type SparkApplicationSpec struct {
|
||||||
// +optional
|
// +optional
|
||||||
MainClass *string `json:"mainClass,omitempty"`
|
MainClass *string `json:"mainClass,omitempty"`
|
||||||
// MainFile is the path to a bundled JAR, Python, or R file of the application.
|
// MainFile is the path to a bundled JAR, Python, or R file of the application.
|
||||||
|
// +optional
|
||||||
MainApplicationFile *string `json:"mainApplicationFile"`
|
MainApplicationFile *string `json:"mainApplicationFile"`
|
||||||
// Arguments is a list of arguments to be passed to the application.
|
// Arguments is a list of arguments to be passed to the application.
|
||||||
// +optional
|
// +optional
|
||||||
|
@ -182,20 +183,17 @@ type SparkApplicationStatus struct {
|
||||||
// +kubebuilder:printcolumn:JSONPath=.status.lastSubmissionAttemptTime,name=Start,type=string
|
// +kubebuilder:printcolumn:JSONPath=.status.lastSubmissionAttemptTime,name=Start,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=.status.terminationTime,name=Finish,type=string
|
// +kubebuilder:printcolumn:JSONPath=.status.terminationTime,name=Finish,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date
|
// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
|
||||||
// +genclient
|
|
||||||
|
|
||||||
// SparkApplication is the Schema for the sparkapplications API
|
// SparkApplication is the Schema for the sparkapplications API
|
||||||
type SparkApplication struct {
|
type SparkApplication struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
metav1.ObjectMeta `json:"metadata"`
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
Spec SparkApplicationSpec `json:"spec"`
|
Spec SparkApplicationSpec `json:"spec,omitempty"`
|
||||||
Status SparkApplicationStatus `json:"status,omitempty"`
|
Status SparkApplicationStatus `json:"status,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
|
||||||
|
|
||||||
// SparkApplicationList contains a list of SparkApplication
|
// SparkApplicationList contains a list of SparkApplication
|
||||||
type SparkApplicationList struct {
|
type SparkApplicationList struct {
|
||||||
|
@ -282,15 +280,15 @@ type SparkUIConfiguration struct {
|
||||||
// ServicePort allows configuring the port at service level that might be different from the targetPort.
|
// ServicePort allows configuring the port at service level that might be different from the targetPort.
|
||||||
// TargetPort should be the same as the one defined in spark.ui.port
|
// TargetPort should be the same as the one defined in spark.ui.port
|
||||||
// +optional
|
// +optional
|
||||||
ServicePort *int32 `json:"servicePort,omitempty"`
|
ServicePort *int32 `json:"servicePort"`
|
||||||
// ServicePortName allows configuring the name of the service port.
|
// ServicePortName allows configuring the name of the service port.
|
||||||
// This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP.
|
// This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP.
|
||||||
// Defaults to spark-driver-ui-port.
|
// Defaults to spark-driver-ui-port.
|
||||||
// +optional
|
// +optional
|
||||||
ServicePortName *string `json:"servicePortName,omitempty"`
|
ServicePortName *string `json:"servicePortName"`
|
||||||
// ServiceType allows configuring the type of the service. Defaults to ClusterIP.
|
// ServiceType allows configuring the type of the service. Defaults to ClusterIP.
|
||||||
// +optional
|
// +optional
|
||||||
ServiceType *corev1.ServiceType `json:"serviceType,omitempty"`
|
ServiceType *corev1.ServiceType `json:"serviceType"`
|
||||||
// ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object.
|
// ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object.
|
||||||
// +optional
|
// +optional
|
||||||
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
||||||
|
@ -314,7 +312,7 @@ type DriverIngressConfiguration struct {
|
||||||
ServicePortName *string `json:"servicePortName"`
|
ServicePortName *string `json:"servicePortName"`
|
||||||
// ServiceType allows configuring the type of the service. Defaults to ClusterIP.
|
// ServiceType allows configuring the type of the service. Defaults to ClusterIP.
|
||||||
// +optional
|
// +optional
|
||||||
ServiceType *corev1.ServiceType `json:"serviceType,omitempty"`
|
ServiceType *corev1.ServiceType `json:"serviceType"`
|
||||||
// ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object.
|
// ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object.
|
||||||
// +optional
|
// +optional
|
||||||
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
||||||
|
@ -404,22 +402,11 @@ type Dependencies struct {
|
||||||
// given with the "packages" option.
|
// given with the "packages" option.
|
||||||
// +optional
|
// +optional
|
||||||
Repositories []string `json:"repositories,omitempty"`
|
Repositories []string `json:"repositories,omitempty"`
|
||||||
// Archives is a list of archives to be extracted into the working directory of each executor.
|
|
||||||
// +optional
|
|
||||||
Archives []string `json:"archives,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SparkPodSpec defines common things that can be customized for a Spark driver or executor pod.
|
// SparkPodSpec defines common things that can be customized for a Spark driver or executor pod.
|
||||||
// TODO: investigate if we should use v1.PodSpec and limit what can be set instead.
|
// TODO: investigate if we should use v1.PodSpec and limit what can be set instead.
|
||||||
type SparkPodSpec struct {
|
type SparkPodSpec struct {
|
||||||
// Template is a pod template that can be used to define the driver or executor pod configurations that Spark configurations do not support.
|
|
||||||
// Spark version >= 3.0.0 is required.
|
|
||||||
// Ref: https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template.
|
|
||||||
// +optional
|
|
||||||
// +kubebuilder:validation:Schemaless
|
|
||||||
// +kubebuilder:validation:Type:=object
|
|
||||||
// +kubebuilder:pruning:PreserveUnknownFields
|
|
||||||
Template *corev1.PodTemplateSpec `json:"template,omitempty"`
|
|
||||||
// Cores maps to `spark.driver.cores` or `spark.executor.cores` for the driver and executors, respectively.
|
// Cores maps to `spark.driver.cores` or `spark.executor.cores` for the driver and executors, respectively.
|
||||||
// +optional
|
// +optional
|
||||||
// +kubebuilder:validation:Minimum=1
|
// +kubebuilder:validation:Minimum=1
|
||||||
|
@ -430,9 +417,6 @@ type SparkPodSpec struct {
|
||||||
// Memory is the amount of memory to request for the pod.
|
// Memory is the amount of memory to request for the pod.
|
||||||
// +optional
|
// +optional
|
||||||
Memory *string `json:"memory,omitempty"`
|
Memory *string `json:"memory,omitempty"`
|
||||||
// MemoryLimit overrides the memory limit of the pod.
|
|
||||||
// +optional
|
|
||||||
MemoryLimit *string `json:"memoryLimit,omitempty"`
|
|
||||||
// MemoryOverhead is the amount of off-heap memory to allocate in cluster mode, in MiB unless otherwise specified.
|
// MemoryOverhead is the amount of off-heap memory to allocate in cluster mode, in MiB unless otherwise specified.
|
||||||
// +optional
|
// +optional
|
||||||
MemoryOverhead *string `json:"memoryOverhead,omitempty"`
|
MemoryOverhead *string `json:"memoryOverhead,omitempty"`
|
||||||
|
@ -706,12 +690,6 @@ type DynamicAllocation struct {
|
||||||
// MaxExecutors is the upper bound for the number of executors if dynamic allocation is enabled.
|
// MaxExecutors is the upper bound for the number of executors if dynamic allocation is enabled.
|
||||||
// +optional
|
// +optional
|
||||||
MaxExecutors *int32 `json:"maxExecutors,omitempty"`
|
MaxExecutors *int32 `json:"maxExecutors,omitempty"`
|
||||||
// ShuffleTrackingEnabled enables shuffle file tracking for executors, which allows dynamic allocation without
|
|
||||||
// the need for an external shuffle service. This option will try to keep alive executors that are storing
|
|
||||||
// shuffle data for active jobs. If external shuffle service is enabled, set ShuffleTrackingEnabled to false.
|
|
||||||
// ShuffleTrackingEnabled is true by default if dynamicAllocation.enabled is true.
|
|
||||||
// +optional
|
|
||||||
ShuffleTrackingEnabled *bool `json:"shuffleTrackingEnabled,omitempty"`
|
|
||||||
// ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding
|
// ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding
|
||||||
// shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled).
|
// shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled).
|
||||||
// +optional
|
// +optional
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
//go:build !ignore_autogenerated
|
//go:build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2025 The Kubeflow authors.
|
Copyright 2024 The Kubeflow authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
@ -106,11 +106,6 @@ func (in *Dependencies) DeepCopyInto(out *Dependencies) {
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
if in.Archives != nil {
|
|
||||||
in, out := &in.Archives, &out.Archives
|
|
||||||
*out = make([]string, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies.
|
||||||
|
@ -279,11 +274,6 @@ func (in *DynamicAllocation) DeepCopyInto(out *DynamicAllocation) {
|
||||||
*out = new(int32)
|
*out = new(int32)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.ShuffleTrackingEnabled != nil {
|
|
||||||
in, out := &in.ShuffleTrackingEnabled, &out.ShuffleTrackingEnabled
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.ShuffleTrackingTimeout != nil {
|
if in.ShuffleTrackingTimeout != nil {
|
||||||
in, out := &in.ShuffleTrackingTimeout, &out.ShuffleTrackingTimeout
|
in, out := &in.ShuffleTrackingTimeout, &out.ShuffleTrackingTimeout
|
||||||
*out = new(int64)
|
*out = new(int64)
|
||||||
|
@ -881,11 +871,6 @@ func (in *SparkApplicationStatus) DeepCopy() *SparkApplicationStatus {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) {
|
func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.Template != nil {
|
|
||||||
in, out := &in.Template, &out.Template
|
|
||||||
*out = new(v1.PodTemplateSpec)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.Cores != nil {
|
if in.Cores != nil {
|
||||||
in, out := &in.Cores, &out.Cores
|
in, out := &in.Cores, &out.Cores
|
||||||
*out = new(int32)
|
*out = new(int32)
|
||||||
|
@ -901,11 +886,6 @@ func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) {
|
||||||
*out = new(string)
|
*out = new(string)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.MemoryLimit != nil {
|
|
||||||
in, out := &in.MemoryLimit, &out.MemoryLimit
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.MemoryOverhead != nil {
|
if in.MemoryOverhead != nil {
|
||||||
in, out := &in.MemoryOverhead, &out.MemoryOverhead
|
in, out := &in.MemoryOverhead, &out.MemoryOverhead
|
||||||
*out = new(string)
|
*out = new(string)
|
||||||
|
|
|
@ -20,9 +20,9 @@ name: spark-operator
|
||||||
|
|
||||||
description: A Helm chart for Spark on Kubernetes operator.
|
description: A Helm chart for Spark on Kubernetes operator.
|
||||||
|
|
||||||
version: 2.2.1
|
version: 2.0.2
|
||||||
|
|
||||||
appVersion: 2.2.1
|
appVersion: 2.0.2
|
||||||
|
|
||||||
keywords:
|
keywords:
|
||||||
- apache spark
|
- apache spark
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# spark-operator
|
# spark-operator
|
||||||
|
|
||||||
 
|
 
|
||||||
|
|
||||||
A Helm chart for Spark on Kubernetes operator.
|
A Helm chart for Spark on Kubernetes operator.
|
||||||
|
|
||||||
|
@ -78,47 +78,39 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum
|
||||||
| nameOverride | string | `""` | String to partially override release name. |
|
| nameOverride | string | `""` | String to partially override release name. |
|
||||||
| fullnameOverride | string | `""` | String to fully override release name. |
|
| fullnameOverride | string | `""` | String to fully override release name. |
|
||||||
| commonLabels | object | `{}` | Common labels to add to the resources. |
|
| commonLabels | object | `{}` | Common labels to add to the resources. |
|
||||||
| image.registry | string | `"ghcr.io"` | Image registry. |
|
| image.registry | string | `"docker.io"` | Image registry. |
|
||||||
| image.repository | string | `"kubeflow/spark-operator/controller"` | Image repository. |
|
| image.repository | string | `"kubeflow/spark-operator"` | Image repository. |
|
||||||
| image.tag | string | If not set, the chart appVersion will be used. | Image tag. |
|
| image.tag | string | If not set, the chart appVersion will be used. | Image tag. |
|
||||||
| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
|
||||||
| image.pullSecrets | list | `[]` | Image pull secrets for private image registry. |
|
| image.pullSecrets | list | `[]` | Image pull secrets for private image registry. |
|
||||||
| controller.replicas | int | `1` | Number of replicas of controller. |
|
| controller.replicas | int | `1` | Number of replicas of controller. |
|
||||||
| controller.leaderElection.enable | bool | `true` | Specifies whether to enable leader election for controller. |
|
|
||||||
| controller.workers | int | `10` | Reconcile concurrency, higher values might increase memory usage. |
|
| controller.workers | int | `10` | Reconcile concurrency, higher values might increase memory usage. |
|
||||||
| controller.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error`. |
|
| controller.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error`. |
|
||||||
| controller.logEncoder | string | `"console"` | Configure the encoder of logging, can be one of `console` or `json`. |
|
|
||||||
| controller.driverPodCreationGracePeriod | string | `"10s"` | Grace period after a successful spark-submit when driver pod not found errors will be retried. Useful if the driver pod can take some time to be created. |
|
|
||||||
| controller.maxTrackedExecutorPerApp | int | `1000` | Specifies the maximum number of Executor pods that can be tracked by the controller per SparkApplication. |
|
|
||||||
| controller.uiService.enable | bool | `true` | Specifies whether to create service for Spark web UI. |
|
| controller.uiService.enable | bool | `true` | Specifies whether to create service for Spark web UI. |
|
||||||
| controller.uiIngress.enable | bool | `false` | Specifies whether to create ingress for Spark web UI. `controller.uiService.enable` must be `true` to enable ingress. |
|
| controller.uiIngress.enable | bool | `false` | Specifies whether to create ingress for Spark web UI. `controller.uiService.enable` must be `true` to enable ingress. |
|
||||||
| controller.uiIngress.urlFormat | string | `""` | Ingress URL format. Required if `controller.uiIngress.enable` is true. |
|
| controller.uiIngress.urlFormat | string | `""` | Ingress URL format. Required if `controller.uiIngress.enable` is true. |
|
||||||
| controller.uiIngress.ingressClassName | string | `""` | Optionally set the ingressClassName. |
|
|
||||||
| controller.uiIngress.tls | list | `[]` | Optionally set default TLS configuration for the Spark UI's ingress. `ingressTLS` in the SparkApplication spec overrides this. |
|
|
||||||
| controller.uiIngress.annotations | object | `{}` | Optionally set default ingress annotations for the Spark UI's ingress. `ingressAnnotations` in the SparkApplication spec overrides this. |
|
|
||||||
| controller.batchScheduler.enable | bool | `false` | Specifies whether to enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application. |
|
| controller.batchScheduler.enable | bool | `false` | Specifies whether to enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application. |
|
||||||
| controller.batchScheduler.kubeSchedulerNames | list | `[]` | Specifies a list of kube-scheduler names for scheduling Spark pods. |
|
| controller.batchScheduler.kubeSchedulerNames | list | `[]` | Specifies a list of kube-scheduler names for scheduling Spark pods. |
|
||||||
| controller.batchScheduler.default | string | `""` | Default batch scheduler to be used if not specified by the user. If specified, this value must be either "volcano" or "yunikorn". Specifying any other value will cause the controller to error on startup. |
|
| controller.batchScheduler.default | string | `""` | Default batch scheduler to be used if not specified by the user. If specified, this value must be either "volcano" or "yunikorn". Specifying any other value will cause the controller to error on startup. |
|
||||||
| controller.serviceAccount.create | bool | `true` | Specifies whether to create a service account for the controller. |
|
| controller.serviceAccount.create | bool | `true` | Specifies whether to create a service account for the controller. |
|
||||||
| controller.serviceAccount.name | string | `""` | Optional name for the controller service account. |
|
| controller.serviceAccount.name | string | `""` | Optional name for the controller service account. |
|
||||||
| controller.serviceAccount.annotations | object | `{}` | Extra annotations for the controller service account. |
|
| controller.serviceAccount.annotations | object | `{}` | Extra annotations for the controller service account. |
|
||||||
| controller.serviceAccount.automountServiceAccountToken | bool | `true` | Auto-mount service account token to the controller pods. |
|
|
||||||
| controller.rbac.create | bool | `true` | Specifies whether to create RBAC resources for the controller. |
|
| controller.rbac.create | bool | `true` | Specifies whether to create RBAC resources for the controller. |
|
||||||
| controller.rbac.annotations | object | `{}` | Extra annotations for the controller RBAC resources. |
|
| controller.rbac.annotations | object | `{}` | Extra annotations for the controller RBAC resources. |
|
||||||
| controller.labels | object | `{}` | Extra labels for controller pods. |
|
| controller.labels | object | `{}` | Extra labels for controller pods. |
|
||||||
| controller.annotations | object | `{}` | Extra annotations for controller pods. |
|
| controller.annotations | object | `{}` | Extra annotations for controller pods. |
|
||||||
| controller.volumes | list | `[{"emptyDir":{"sizeLimit":"1Gi"},"name":"tmp"}]` | Volumes for controller pods. |
|
| controller.volumes | list | `[]` | Volumes for controller pods. |
|
||||||
| controller.nodeSelector | object | `{}` | Node selector for controller pods. |
|
| controller.nodeSelector | object | `{}` | Node selector for controller pods. |
|
||||||
| controller.affinity | object | `{}` | Affinity for controller pods. |
|
| controller.affinity | object | `{}` | Affinity for controller pods. |
|
||||||
| controller.tolerations | list | `[]` | List of node taints to tolerate for controller pods. |
|
| controller.tolerations | list | `[]` | List of node taints to tolerate for controller pods. |
|
||||||
| controller.priorityClassName | string | `""` | Priority class for controller pods. |
|
| controller.priorityClassName | string | `""` | Priority class for controller pods. |
|
||||||
| controller.podSecurityContext | object | `{"fsGroup":185}` | Security context for controller pods. |
|
| controller.podSecurityContext | object | `{}` | Security context for controller pods. |
|
||||||
| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). The labelSelector field in topology spread constraint will be set to the selector labels for controller pods if not specified. |
|
| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). The labelSelector field in topology spread constraint will be set to the selector labels for controller pods if not specified. |
|
||||||
| controller.env | list | `[]` | Environment variables for controller containers. |
|
| controller.env | list | `[]` | Environment variables for controller containers. |
|
||||||
| controller.envFrom | list | `[]` | Environment variable sources for controller containers. |
|
| controller.envFrom | list | `[]` | Environment variable sources for controller containers. |
|
||||||
| controller.volumeMounts | list | `[{"mountPath":"/tmp","name":"tmp","readOnly":false}]` | Volume mounts for controller containers. |
|
| controller.volumeMounts | list | `[]` | Volume mounts for controller containers. |
|
||||||
| controller.resources | object | `{}` | Pod resource requests and limits for controller containers. Note, that each job submission will spawn a JVM within the controller pods using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. |
|
| controller.resources | object | `{}` | Pod resource requests and limits for controller containers. Note, that each job submission will spawn a JVM within the controller pods using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. |
|
||||||
| controller.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for controller containers. |
|
| controller.securityContext | object | `{}` | Security context for controller containers. |
|
||||||
| controller.sidecars | list | `[]` | Sidecar containers for controller pods. |
|
| controller.sidecars | list | `[]` | Sidecar containers for controller pods. |
|
||||||
| controller.podDisruptionBudget.enable | bool | `false` | Specifies whether to create pod disruption budget for controller. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) |
|
| controller.podDisruptionBudget.enable | bool | `false` | Specifies whether to create pod disruption budget for controller. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) |
|
||||||
| controller.podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `controller.replicas` to be greater than 1 |
|
| controller.podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `controller.replicas` to be greater than 1 |
|
||||||
|
@ -131,9 +123,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum
|
||||||
| controller.workqueueRateLimiter.maxDelay.duration | string | `"6h"` | Specifies the maximum delay duration for the workqueue rate limiter. |
|
| controller.workqueueRateLimiter.maxDelay.duration | string | `"6h"` | Specifies the maximum delay duration for the workqueue rate limiter. |
|
||||||
| webhook.enable | bool | `true` | Specifies whether to enable webhook. |
|
| webhook.enable | bool | `true` | Specifies whether to enable webhook. |
|
||||||
| webhook.replicas | int | `1` | Number of replicas of webhook server. |
|
| webhook.replicas | int | `1` | Number of replicas of webhook server. |
|
||||||
| webhook.leaderElection.enable | bool | `true` | Specifies whether to enable leader election for webhook. |
|
|
||||||
| webhook.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error`. |
|
| webhook.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error`. |
|
||||||
| webhook.logEncoder | string | `"console"` | Configure the encoder of logging, can be one of `console` or `json`. |
|
|
||||||
| webhook.port | int | `9443` | Specifies webhook port. |
|
| webhook.port | int | `9443` | Specifies webhook port. |
|
||||||
| webhook.portName | string | `"webhook"` | Specifies webhook service port name. |
|
| webhook.portName | string | `"webhook"` | Specifies webhook service port name. |
|
||||||
| webhook.failurePolicy | string | `"Fail"` | Specifies how unrecognized errors are handled. Available options are `Ignore` or `Fail`. |
|
| webhook.failurePolicy | string | `"Fail"` | Specifies how unrecognized errors are handled. Available options are `Ignore` or `Fail`. |
|
||||||
|
@ -142,31 +132,29 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum
|
||||||
| webhook.serviceAccount.create | bool | `true` | Specifies whether to create a service account for the webhook. |
|
| webhook.serviceAccount.create | bool | `true` | Specifies whether to create a service account for the webhook. |
|
||||||
| webhook.serviceAccount.name | string | `""` | Optional name for the webhook service account. |
|
| webhook.serviceAccount.name | string | `""` | Optional name for the webhook service account. |
|
||||||
| webhook.serviceAccount.annotations | object | `{}` | Extra annotations for the webhook service account. |
|
| webhook.serviceAccount.annotations | object | `{}` | Extra annotations for the webhook service account. |
|
||||||
| webhook.serviceAccount.automountServiceAccountToken | bool | `true` | Auto-mount service account token to the webhook pods. |
|
|
||||||
| webhook.rbac.create | bool | `true` | Specifies whether to create RBAC resources for the webhook. |
|
| webhook.rbac.create | bool | `true` | Specifies whether to create RBAC resources for the webhook. |
|
||||||
| webhook.rbac.annotations | object | `{}` | Extra annotations for the webhook RBAC resources. |
|
| webhook.rbac.annotations | object | `{}` | Extra annotations for the webhook RBAC resources. |
|
||||||
| webhook.labels | object | `{}` | Extra labels for webhook pods. |
|
| webhook.labels | object | `{}` | Extra labels for webhook pods. |
|
||||||
| webhook.annotations | object | `{}` | Extra annotations for webhook pods. |
|
| webhook.annotations | object | `{}` | Extra annotations for webhook pods. |
|
||||||
| webhook.sidecars | list | `[]` | Sidecar containers for webhook pods. |
|
| webhook.sidecars | list | `[]` | Sidecar containers for webhook pods. |
|
||||||
| webhook.volumes | list | `[{"emptyDir":{"sizeLimit":"500Mi"},"name":"serving-certs"}]` | Volumes for webhook pods. |
|
| webhook.volumes | list | `[]` | Volumes for webhook pods. |
|
||||||
| webhook.nodeSelector | object | `{}` | Node selector for webhook pods. |
|
| webhook.nodeSelector | object | `{}` | Node selector for webhook pods. |
|
||||||
| webhook.affinity | object | `{}` | Affinity for webhook pods. |
|
| webhook.affinity | object | `{}` | Affinity for webhook pods. |
|
||||||
| webhook.tolerations | list | `[]` | List of node taints to tolerate for webhook pods. |
|
| webhook.tolerations | list | `[]` | List of node taints to tolerate for webhook pods. |
|
||||||
| webhook.priorityClassName | string | `""` | Priority class for webhook pods. |
|
| webhook.priorityClassName | string | `""` | Priority class for webhook pods. |
|
||||||
| webhook.podSecurityContext | object | `{"fsGroup":185}` | Security context for webhook pods. |
|
| webhook.podSecurityContext | object | `{}` | Security context for webhook pods. |
|
||||||
| webhook.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). The labelSelector field in topology spread constraint will be set to the selector labels for webhook pods if not specified. |
|
| webhook.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). The labelSelector field in topology spread constraint will be set to the selector labels for webhook pods if not specified. |
|
||||||
| webhook.env | list | `[]` | Environment variables for webhook containers. |
|
| webhook.env | list | `[]` | Environment variables for webhook containers. |
|
||||||
| webhook.envFrom | list | `[]` | Environment variable sources for webhook containers. |
|
| webhook.envFrom | list | `[]` | Environment variable sources for webhook containers. |
|
||||||
| webhook.volumeMounts | list | `[{"mountPath":"/etc/k8s-webhook-server/serving-certs","name":"serving-certs","readOnly":false,"subPath":"serving-certs"}]` | Volume mounts for webhook containers. |
|
| webhook.volumeMounts | list | `[]` | Volume mounts for webhook containers. |
|
||||||
| webhook.resources | object | `{}` | Pod resource requests and limits for webhook pods. |
|
| webhook.resources | object | `{}` | Pod resource requests and limits for webhook pods. |
|
||||||
| webhook.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for webhook containers. |
|
| webhook.securityContext | object | `{}` | Security context for webhook containers. |
|
||||||
| webhook.podDisruptionBudget.enable | bool | `false` | Specifies whether to create pod disruption budget for webhook. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) |
|
| webhook.podDisruptionBudget.enable | bool | `false` | Specifies whether to create pod disruption budget for webhook. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) |
|
||||||
| webhook.podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `webhook.replicas` to be greater than 1 |
|
| webhook.podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `webhook.replicas` to be greater than 1 |
|
||||||
| spark.jobNamespaces | list | `["default"]` | List of namespaces where to run spark jobs. If empty string is included, all namespaces will be allowed. Make sure the namespaces have already existed. |
|
| spark.jobNamespaces | list | `["default"]` | List of namespaces where to run spark jobs. If empty string is included, all namespaces will be allowed. Make sure the namespaces have already existed. |
|
||||||
| spark.serviceAccount.create | bool | `true` | Specifies whether to create a service account for spark applications. |
|
| spark.serviceAccount.create | bool | `true` | Specifies whether to create a service account for spark applications. |
|
||||||
| spark.serviceAccount.name | string | `""` | Optional name for the spark service account. |
|
| spark.serviceAccount.name | string | `""` | Optional name for the spark service account. |
|
||||||
| spark.serviceAccount.annotations | object | `{}` | Optional annotations for the spark service account. |
|
| spark.serviceAccount.annotations | object | `{}` | Optional annotations for the spark service account. |
|
||||||
| spark.serviceAccount.automountServiceAccountToken | bool | `true` | Auto-mount service account token to the spark applications pods. |
|
|
||||||
| spark.rbac.create | bool | `true` | Specifies whether to create RBAC resources for spark applications. |
|
| spark.rbac.create | bool | `true` | Specifies whether to create RBAC resources for spark applications. |
|
||||||
| spark.rbac.annotations | object | `{}` | Optional annotations for the spark application RBAC resources. |
|
| spark.rbac.annotations | object | `{}` | Optional annotations for the spark application RBAC resources. |
|
||||||
| prometheus.metrics.enable | bool | `true` | Specifies whether to enable prometheus metrics scraping. |
|
| prometheus.metrics.enable | bool | `true` | Specifies whether to enable prometheus metrics scraping. |
|
||||||
|
@ -174,15 +162,10 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum
|
||||||
| prometheus.metrics.portName | string | `"metrics"` | Metrics port name. |
|
| prometheus.metrics.portName | string | `"metrics"` | Metrics port name. |
|
||||||
| prometheus.metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint. |
|
| prometheus.metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint. |
|
||||||
| prometheus.metrics.prefix | string | `""` | Metrics prefix, will be added to all exported metrics. |
|
| prometheus.metrics.prefix | string | `""` | Metrics prefix, will be added to all exported metrics. |
|
||||||
| prometheus.metrics.jobStartLatencyBuckets | string | `"30,60,90,120,150,180,210,240,270,300"` | Job Start Latency histogram buckets. Specified in seconds. |
|
|
||||||
| prometheus.podMonitor.create | bool | `false` | Specifies whether to create pod monitor. Note that prometheus metrics should be enabled as well. |
|
| prometheus.podMonitor.create | bool | `false` | Specifies whether to create pod monitor. Note that prometheus metrics should be enabled as well. |
|
||||||
| prometheus.podMonitor.labels | object | `{}` | Pod monitor labels |
|
| prometheus.podMonitor.labels | object | `{}` | Pod monitor labels |
|
||||||
| prometheus.podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from |
|
| prometheus.podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from |
|
||||||
| prometheus.podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port |
|
| prometheus.podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port |
|
||||||
| certManager.enable | bool | `false` | Specifies whether to use [cert-manager](https://cert-manager.io) to generate certificate for webhook. `webhook.enable` must be set to `true` to enable cert-manager. |
|
|
||||||
| certManager.issuerRef | object | A self-signed issuer will be created and used if not specified. | The reference to the issuer. |
|
|
||||||
| certManager.duration | string | `2160h` (90 days) will be used if not specified. | The duration of the certificate validity (e.g. `2160h`). See [cert-manager.io/v1.Certificate](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.Certificate). |
|
|
||||||
| certManager.renewBefore | string | 1/3 of issued certificate’s lifetime. | The duration before the certificate expiration to renew the certificate (e.g. `720h`). See [cert-manager.io/v1.Certificate](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.Certificate). |
|
|
||||||
|
|
||||||
## Maintainers
|
## Maintainers
|
||||||
|
|
||||||
|
|
|
@ -2,4 +2,6 @@ kind: Cluster
|
||||||
apiVersion: kind.x-k8s.io/v1alpha4
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
nodes:
|
nodes:
|
||||||
- role: control-plane
|
- role: control-plane
|
||||||
|
image: kindest/node:v1.29.2
|
||||||
- role: worker
|
- role: worker
|
||||||
|
image: kindest/node:v1.29.2
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,272 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298
|
|
||||||
controller-gen.kubebuilder.io/version: v0.17.1
|
|
||||||
name: sparkconnects.sparkoperator.k8s.io
|
|
||||||
spec:
|
|
||||||
group: sparkoperator.k8s.io
|
|
||||||
names:
|
|
||||||
kind: SparkConnect
|
|
||||||
listKind: SparkConnectList
|
|
||||||
plural: sparkconnects
|
|
||||||
shortNames:
|
|
||||||
- sparkconn
|
|
||||||
singular: sparkconnect
|
|
||||||
scope: Namespaced
|
|
||||||
versions:
|
|
||||||
- additionalPrinterColumns:
|
|
||||||
- jsonPath: .metadata.creationTimestamp
|
|
||||||
name: Age
|
|
||||||
type: date
|
|
||||||
name: v1alpha1
|
|
||||||
schema:
|
|
||||||
openAPIV3Schema:
|
|
||||||
description: SparkConnect is the Schema for the sparkconnections API.
|
|
||||||
properties:
|
|
||||||
apiVersion:
|
|
||||||
description: |-
|
|
||||||
APIVersion defines the versioned schema of this representation of an object.
|
|
||||||
Servers should convert recognized schemas to the latest internal value, and
|
|
||||||
may reject unrecognized values.
|
|
||||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
|
||||||
type: string
|
|
||||||
kind:
|
|
||||||
description: |-
|
|
||||||
Kind is a string value representing the REST resource this object represents.
|
|
||||||
Servers may infer this from the endpoint the client submits requests to.
|
|
||||||
Cannot be updated.
|
|
||||||
In CamelCase.
|
|
||||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
|
||||||
type: string
|
|
||||||
metadata:
|
|
||||||
type: object
|
|
||||||
spec:
|
|
||||||
description: SparkConnectSpec defines the desired state of SparkConnect.
|
|
||||||
properties:
|
|
||||||
dynamicAllocation:
|
|
||||||
description: |-
|
|
||||||
DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes
|
|
||||||
scheduler backend since Spark 3.0.
|
|
||||||
properties:
|
|
||||||
enabled:
|
|
||||||
description: Enabled controls whether dynamic allocation is enabled
|
|
||||||
or not.
|
|
||||||
type: boolean
|
|
||||||
initialExecutors:
|
|
||||||
description: |-
|
|
||||||
InitialExecutors is the initial number of executors to request. If .spec.executor.instances
|
|
||||||
is also set, the initial number of executors is set to the bigger of that and this option.
|
|
||||||
format: int32
|
|
||||||
type: integer
|
|
||||||
maxExecutors:
|
|
||||||
description: MaxExecutors is the upper bound for the number of
|
|
||||||
executors if dynamic allocation is enabled.
|
|
||||||
format: int32
|
|
||||||
type: integer
|
|
||||||
minExecutors:
|
|
||||||
description: MinExecutors is the lower bound for the number of
|
|
||||||
executors if dynamic allocation is enabled.
|
|
||||||
format: int32
|
|
||||||
type: integer
|
|
||||||
shuffleTrackingEnabled:
|
|
||||||
description: |-
|
|
||||||
ShuffleTrackingEnabled enables shuffle file tracking for executors, which allows dynamic allocation without
|
|
||||||
the need for an external shuffle service. This option will try to keep alive executors that are storing
|
|
||||||
shuffle data for active jobs. If external shuffle service is enabled, set ShuffleTrackingEnabled to false.
|
|
||||||
ShuffleTrackingEnabled is true by default if dynamicAllocation.enabled is true.
|
|
||||||
type: boolean
|
|
||||||
shuffleTrackingTimeout:
|
|
||||||
description: |-
|
|
||||||
ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding
|
|
||||||
shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled).
|
|
||||||
format: int64
|
|
||||||
type: integer
|
|
||||||
type: object
|
|
||||||
executor:
|
|
||||||
description: Executor is the Spark executor specification.
|
|
||||||
properties:
|
|
||||||
cores:
|
|
||||||
description: Cores maps to `spark.driver.cores` or `spark.executor.cores`
|
|
||||||
for the driver and executors, respectively.
|
|
||||||
format: int32
|
|
||||||
minimum: 1
|
|
||||||
type: integer
|
|
||||||
instances:
|
|
||||||
description: Instances is the number of executor instances.
|
|
||||||
format: int32
|
|
||||||
minimum: 0
|
|
||||||
type: integer
|
|
||||||
memory:
|
|
||||||
description: Memory is the amount of memory to request for the
|
|
||||||
pod.
|
|
||||||
type: string
|
|
||||||
template:
|
|
||||||
description: |-
|
|
||||||
Template is a pod template that can be used to define the driver or executor pod configurations that Spark configurations do not support.
|
|
||||||
Spark version >= 3.0.0 is required.
|
|
||||||
Ref: https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template.
|
|
||||||
type: object
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
type: object
|
|
||||||
hadoopConf:
|
|
||||||
additionalProperties:
|
|
||||||
type: string
|
|
||||||
description: |-
|
|
||||||
HadoopConf carries user-specified Hadoop configuration properties as they would use the "--conf" option
|
|
||||||
in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop
|
|
||||||
configuration properties.
|
|
||||||
type: object
|
|
||||||
image:
|
|
||||||
description: |-
|
|
||||||
Image is the container image for the driver, executor, and init-container. Any custom container images for the
|
|
||||||
driver, executor, or init-container takes precedence over this.
|
|
||||||
type: string
|
|
||||||
server:
|
|
||||||
description: Server is the Spark connect server specification.
|
|
||||||
properties:
|
|
||||||
cores:
|
|
||||||
description: Cores maps to `spark.driver.cores` or `spark.executor.cores`
|
|
||||||
for the driver and executors, respectively.
|
|
||||||
format: int32
|
|
||||||
minimum: 1
|
|
||||||
type: integer
|
|
||||||
memory:
|
|
||||||
description: Memory is the amount of memory to request for the
|
|
||||||
pod.
|
|
||||||
type: string
|
|
||||||
template:
|
|
||||||
description: |-
|
|
||||||
Template is a pod template that can be used to define the driver or executor pod configurations that Spark configurations do not support.
|
|
||||||
Spark version >= 3.0.0 is required.
|
|
||||||
Ref: https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template.
|
|
||||||
type: object
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
type: object
|
|
||||||
sparkConf:
|
|
||||||
additionalProperties:
|
|
||||||
type: string
|
|
||||||
description: |-
|
|
||||||
SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in
|
|
||||||
spark-submit.
|
|
||||||
type: object
|
|
||||||
sparkVersion:
|
|
||||||
description: SparkVersion is the version of Spark the spark connect
|
|
||||||
use.
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- executor
|
|
||||||
- server
|
|
||||||
- sparkVersion
|
|
||||||
type: object
|
|
||||||
status:
|
|
||||||
description: SparkConnectStatus defines the observed state of SparkConnect.
|
|
||||||
properties:
|
|
||||||
conditions:
|
|
||||||
description: Represents the latest available observations of a SparkConnect's
|
|
||||||
current state.
|
|
||||||
items:
|
|
||||||
description: Condition contains details for one aspect of the current
|
|
||||||
state of this API Resource.
|
|
||||||
properties:
|
|
||||||
lastTransitionTime:
|
|
||||||
description: |-
|
|
||||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
|
||||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
|
||||||
format: date-time
|
|
||||||
type: string
|
|
||||||
message:
|
|
||||||
description: |-
|
|
||||||
message is a human readable message indicating details about the transition.
|
|
||||||
This may be an empty string.
|
|
||||||
maxLength: 32768
|
|
||||||
type: string
|
|
||||||
observedGeneration:
|
|
||||||
description: |-
|
|
||||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
|
||||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
|
||||||
with respect to the current state of the instance.
|
|
||||||
format: int64
|
|
||||||
minimum: 0
|
|
||||||
type: integer
|
|
||||||
reason:
|
|
||||||
description: |-
|
|
||||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
|
||||||
Producers of specific condition types may define expected values and meanings for this field,
|
|
||||||
and whether the values are considered a guaranteed API.
|
|
||||||
The value should be a CamelCase string.
|
|
||||||
This field may not be empty.
|
|
||||||
maxLength: 1024
|
|
||||||
minLength: 1
|
|
||||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
|
||||||
type: string
|
|
||||||
status:
|
|
||||||
description: status of the condition, one of True, False, Unknown.
|
|
||||||
enum:
|
|
||||||
- "True"
|
|
||||||
- "False"
|
|
||||||
- Unknown
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
|
||||||
maxLength: 316
|
|
||||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- lastTransitionTime
|
|
||||||
- message
|
|
||||||
- reason
|
|
||||||
- status
|
|
||||||
- type
|
|
||||||
type: object
|
|
||||||
type: array
|
|
||||||
x-kubernetes-list-map-keys:
|
|
||||||
- type
|
|
||||||
x-kubernetes-list-type: map
|
|
||||||
executors:
|
|
||||||
additionalProperties:
|
|
||||||
type: integer
|
|
||||||
description: Executors represents the current state of the SparkConnect
|
|
||||||
executors.
|
|
||||||
type: object
|
|
||||||
lastUpdateTime:
|
|
||||||
description: LastUpdateTime is the time at which the SparkConnect
|
|
||||||
controller last updated the SparkConnect.
|
|
||||||
format: date-time
|
|
||||||
type: string
|
|
||||||
server:
|
|
||||||
description: Server represents the current state of the SparkConnect
|
|
||||||
server.
|
|
||||||
properties:
|
|
||||||
podIp:
|
|
||||||
description: PodIP is the IP address of the pod that is running
|
|
||||||
the Spark Connect server.
|
|
||||||
type: string
|
|
||||||
podName:
|
|
||||||
description: PodName is the name of the pod that is running the
|
|
||||||
Spark Connect server.
|
|
||||||
type: string
|
|
||||||
serviceName:
|
|
||||||
description: ServiceName is the name of the service that is exposing
|
|
||||||
the Spark Connect server.
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
startTime:
|
|
||||||
description: StartTime is the time at which the SparkConnect controller
|
|
||||||
started processing the SparkConnect.
|
|
||||||
format: date-time
|
|
||||||
type: string
|
|
||||||
state:
|
|
||||||
description: State represents the current state of the SparkConnect.
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
required:
|
|
||||||
- metadata
|
|
||||||
- spec
|
|
||||||
type: object
|
|
||||||
served: true
|
|
||||||
storage: true
|
|
||||||
subresources:
|
|
||||||
status: {}
|
|
|
@ -74,5 +74,5 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
Spark Operator image
|
Spark Operator image
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "spark-operator.image" -}}
|
{{- define "spark-operator.image" -}}
|
||||||
{{ printf "%s/%s:%s" .Values.image.registry .Values.image.repository (.Values.image.tag | default .Chart.AppVersion | toString) }}
|
{{ printf "%s/%s:%s" .Values.image.registry .Values.image.repository (.Values.image.tag | default .Chart.AppVersion) }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
|
@ -1,29 +0,0 @@
|
||||||
{{- /*
|
|
||||||
Copyright 2025 The Kubeflow authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/ -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create the name of the webhook certificate issuer.
|
|
||||||
*/}}
|
|
||||||
{{- define "spark-operator.certManager.issuer.name" -}}
|
|
||||||
{{ include "spark-operator.name" . }}-self-signed-issuer
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create the name of the certificate to be used by webhook.
|
|
||||||
*/}}
|
|
||||||
{{- define "spark-operator.certManager.certificate.name" -}}
|
|
||||||
{{ include "spark-operator.name" . }}-certificate
|
|
||||||
{{- end -}}
|
|
|
@ -1,56 +0,0 @@
|
||||||
{{- /*
|
|
||||||
Copyright 2025 The Kubeflow authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/ -}}
|
|
||||||
|
|
||||||
{{- if .Values.webhook.enable }}
|
|
||||||
{{- if .Values.certManager.enable }}
|
|
||||||
{{- if not (.Capabilities.APIVersions.Has "cert-manager.io/v1/Certificate") }}
|
|
||||||
{{- fail "The cluster does not support the required API version `cert-manager.io/v1` for `Certificate`." }}
|
|
||||||
{{- end }}
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: {{ include "spark-operator.certManager.certificate.name" . }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels:
|
|
||||||
{{- include "spark-operator.labels" . | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
secretName: {{ include "spark-operator.webhook.secretName" . }}
|
|
||||||
issuerRef:
|
|
||||||
{{- if not .Values.certManager.issuerRef }}
|
|
||||||
group: cert-manager.io
|
|
||||||
kind: Issuer
|
|
||||||
name: {{ include "spark-operator.certManager.issuer.name" . }}
|
|
||||||
{{- else }}
|
|
||||||
{{- toYaml .Values.certManager.issuerRef | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
commonName: {{ include "spark-operator.webhook.serviceName" . }}.{{ .Release.Namespace }}.svc
|
|
||||||
dnsNames:
|
|
||||||
- {{ include "spark-operator.webhook.serviceName" . }}.{{ .Release.Namespace }}.svc
|
|
||||||
- {{ include "spark-operator.webhook.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local
|
|
||||||
subject:
|
|
||||||
organizationalUnits:
|
|
||||||
- spark-operator
|
|
||||||
usages:
|
|
||||||
- server auth
|
|
||||||
- client auth
|
|
||||||
{{- with .Values.certManager.duration }}
|
|
||||||
duration: {{ . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.certManager.renewBefore }}
|
|
||||||
renewBefore: {{ . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
|
@ -1,34 +0,0 @@
|
||||||
{{- /*
|
|
||||||
Copyright 2025 The Kubeflow authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/ -}}
|
|
||||||
|
|
||||||
{{- if .Values.webhook.enable }}
|
|
||||||
{{- if .Values.certManager.enable }}
|
|
||||||
{{- if not .Values.certManager.issuerRef }}
|
|
||||||
{{- if not (.Capabilities.APIVersions.Has "cert-manager.io/v1/Issuer") }}
|
|
||||||
{{- fail "The cluster does not support the required API version `cert-manager.io/v1` for `Issuer`." }}
|
|
||||||
{{- end }}
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Issuer
|
|
||||||
metadata:
|
|
||||||
name: {{ include "spark-operator.certManager.issuer.name" . }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels:
|
|
||||||
{{- include "spark-operator.labels" . | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
selfSigned: {}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
|
@ -157,14 +157,6 @@ Create the role policy rules for the controller in every Spark job namespace
|
||||||
- update
|
- update
|
||||||
- patch
|
- patch
|
||||||
- delete
|
- delete
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- events
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- patch
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- extensions
|
- extensions
|
||||||
- networking.k8s.io
|
- networking.k8s.io
|
||||||
|
@ -172,17 +164,13 @@ Create the role policy rules for the controller in every Spark job namespace
|
||||||
- ingresses
|
- ingresses
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- create
|
- create
|
||||||
- update
|
|
||||||
- delete
|
- delete
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- sparkoperator.k8s.io
|
- sparkoperator.k8s.io
|
||||||
resources:
|
resources:
|
||||||
- sparkapplications
|
- sparkapplications
|
||||||
- scheduledsparkapplications
|
- scheduledsparkapplications
|
||||||
- sparkconnects
|
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
|
@ -198,7 +186,6 @@ Create the role policy rules for the controller in every Spark job namespace
|
||||||
- sparkapplications/finalizers
|
- sparkapplications/finalizers
|
||||||
- scheduledsparkapplications/status
|
- scheduledsparkapplications/status
|
||||||
- scheduledsparkapplications/finalizers
|
- scheduledsparkapplications/finalizers
|
||||||
- sparkconnects/status
|
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- update
|
- update
|
||||||
|
|
|
@ -56,9 +56,6 @@ spec:
|
||||||
{{- with .Values.controller.logLevel }}
|
{{- with .Values.controller.logLevel }}
|
||||||
- --zap-log-level={{ . }}
|
- --zap-log-level={{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.controller.logEncoder }}
|
|
||||||
- --zap-encoder={{ . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.spark.jobNamespaces }}
|
{{- with .Values.spark.jobNamespaces }}
|
||||||
{{- if has "" . }}
|
{{- if has "" . }}
|
||||||
- --namespaces=""
|
- --namespaces=""
|
||||||
|
@ -67,20 +64,13 @@ spec:
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
- --controller-threads={{ .Values.controller.workers }}
|
- --controller-threads={{ .Values.controller.workers }}
|
||||||
- --enable-ui-service={{ .Values.controller.uiService.enable }}
|
{{- with .Values.controller.uiService.enable }}
|
||||||
|
- --enable-ui-service=true
|
||||||
|
{{- end }}
|
||||||
{{- if .Values.controller.uiIngress.enable }}
|
{{- if .Values.controller.uiIngress.enable }}
|
||||||
{{- with .Values.controller.uiIngress.urlFormat }}
|
{{- with .Values.controller.uiIngress.urlFormat }}
|
||||||
- --ingress-url-format={{ . }}
|
- --ingress-url-format={{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.controller.uiIngress.ingressClassName }}
|
|
||||||
- --ingress-class-name={{ . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.controller.uiIngress.tls }}
|
|
||||||
- --ingress-tls={{ . | toJson }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.controller.uiIngress.annotations }}
|
|
||||||
- --ingress-annotations={{ . | toJson }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.controller.batchScheduler.enable }}
|
{{- if .Values.controller.batchScheduler.enable }}
|
||||||
- --enable-batch-scheduler=true
|
- --enable-batch-scheduler=true
|
||||||
|
@ -97,15 +87,10 @@ spec:
|
||||||
- --metrics-endpoint={{ .Values.prometheus.metrics.endpoint }}
|
- --metrics-endpoint={{ .Values.prometheus.metrics.endpoint }}
|
||||||
- --metrics-prefix={{ .Values.prometheus.metrics.prefix }}
|
- --metrics-prefix={{ .Values.prometheus.metrics.prefix }}
|
||||||
- --metrics-labels=app_type
|
- --metrics-labels=app_type
|
||||||
- --metrics-job-start-latency-buckets={{ .Values.prometheus.metrics.jobStartLatencyBuckets }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{ if .Values.controller.leaderElection.enable }}
|
|
||||||
- --leader-election=true
|
- --leader-election=true
|
||||||
- --leader-election-lock-name={{ include "spark-operator.controller.leaderElectionName" . }}
|
- --leader-election-lock-name={{ include "spark-operator.controller.leaderElectionName" . }}
|
||||||
- --leader-election-lock-namespace={{ .Release.Namespace }}
|
- --leader-election-lock-namespace={{ .Release.Namespace }}
|
||||||
{{- else -}}
|
|
||||||
- --leader-election=false
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.controller.pprof.enable }}
|
{{- if .Values.controller.pprof.enable }}
|
||||||
- --pprof-bind-address=:{{ .Values.controller.pprof.port }}
|
- --pprof-bind-address=:{{ .Values.controller.pprof.port }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -114,12 +99,6 @@ spec:
|
||||||
{{- if .Values.controller.workqueueRateLimiter.maxDelay.enable }}
|
{{- if .Values.controller.workqueueRateLimiter.maxDelay.enable }}
|
||||||
- --workqueue-ratelimiter-max-delay={{ .Values.controller.workqueueRateLimiter.maxDelay.duration }}
|
- --workqueue-ratelimiter-max-delay={{ .Values.controller.workqueueRateLimiter.maxDelay.duration }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.controller.driverPodCreationGracePeriod }}
|
|
||||||
- --driver-pod-creation-grace-period={{ .Values.controller.driverPodCreationGracePeriod }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.controller.maxTrackedExecutorPerApp }}
|
|
||||||
- --max-tracked-executor-per-app={{ .Values.controller.maxTrackedExecutorPerApp }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if or .Values.prometheus.metrics.enable .Values.controller.pprof.enable }}
|
{{- if or .Values.prometheus.metrics.enable .Values.controller.pprof.enable }}
|
||||||
ports:
|
ports:
|
||||||
{{- if .Values.controller.pprof.enable }}
|
{{- if .Values.controller.pprof.enable }}
|
||||||
|
@ -188,7 +167,6 @@ spec:
|
||||||
priorityClassName: {{ . }}
|
priorityClassName: {{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
serviceAccountName: {{ include "spark-operator.controller.serviceAccountName" . }}
|
serviceAccountName: {{ include "spark-operator.controller.serviceAccountName" . }}
|
||||||
automountServiceAccountToken: {{ .Values.controller.serviceAccount.automountServiceAccountToken }}
|
|
||||||
{{- with .Values.controller.podSecurityContext }}
|
{{- with .Values.controller.podSecurityContext }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
|
|
@ -33,6 +33,14 @@ rules:
|
||||||
- nodes
|
- nodes
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- apiextensions.k8s.io
|
- apiextensions.k8s.io
|
||||||
resources:
|
resources:
|
||||||
|
@ -77,7 +85,6 @@ metadata:
|
||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
rules:
|
rules:
|
||||||
{{- if .Values.controller.leaderElection.enable }}
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- coordination.k8s.io
|
- coordination.k8s.io
|
||||||
resources:
|
resources:
|
||||||
|
@ -93,7 +100,6 @@ rules:
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- update
|
- update
|
||||||
{{- end }}
|
|
||||||
{{- if has .Release.Namespace .Values.spark.jobNamespaces }}
|
{{- if has .Release.Namespace .Values.spark.jobNamespaces }}
|
||||||
{{ include "spark-operator.controller.policyRules" . }}
|
{{ include "spark-operator.controller.policyRules" . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||||
{{- if .Values.controller.serviceAccount.create }}
|
{{- if .Values.controller.serviceAccount.create }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
automountServiceAccountToken: {{ .Values.controller.serviceAccount.automountServiceAccountToken }}
|
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "spark-operator.controller.serviceAccountName" . }}
|
name: {{ include "spark-operator.controller.serviceAccountName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
|
|
@ -21,7 +21,6 @@ limitations under the License.
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
automountServiceAccountToken: {{ $.Values.spark.serviceAccount.automountServiceAccountToken }}
|
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "spark-operator.spark.serviceAccountName" $ }}
|
name: {{ include "spark-operator.spark.serviceAccountName" $ }}
|
||||||
namespace: {{ $jobNamespace }}
|
namespace: {{ $jobNamespace }}
|
||||||
|
|
|
@ -83,6 +83,7 @@ Create the name of the secret to be used by webhook
|
||||||
{{ include "spark-operator.webhook.name" . }}-certs
|
{{ include "spark-operator.webhook.name" . }}-certs
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
Create the name of the service to be used by webhook
|
Create the name of the service to be used by webhook
|
||||||
*/}}
|
*/}}
|
||||||
|
|
|
@ -50,9 +50,6 @@ spec:
|
||||||
{{- with .Values.webhook.logLevel }}
|
{{- with .Values.webhook.logLevel }}
|
||||||
- --zap-log-level={{ . }}
|
- --zap-log-level={{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.webhook.logEncoder }}
|
|
||||||
- --zap-encoder={{ . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.spark.jobNamespaces }}
|
{{- with .Values.spark.jobNamespaces }}
|
||||||
{{- if has "" . }}
|
{{- if has "" . }}
|
||||||
- --namespaces=""
|
- --namespaces=""
|
||||||
|
@ -70,9 +67,6 @@ spec:
|
||||||
{{- with .Values.webhook.resourceQuotaEnforcement.enable }}
|
{{- with .Values.webhook.resourceQuotaEnforcement.enable }}
|
||||||
- --enable-resource-quota-enforcement=true
|
- --enable-resource-quota-enforcement=true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.certManager.enable }}
|
|
||||||
- --enable-cert-manager=true
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.prometheus.metrics.enable }}
|
{{- if .Values.prometheus.metrics.enable }}
|
||||||
- --enable-metrics=true
|
- --enable-metrics=true
|
||||||
- --metrics-bind-address=:{{ .Values.prometheus.metrics.port }}
|
- --metrics-bind-address=:{{ .Values.prometheus.metrics.port }}
|
||||||
|
@ -80,13 +74,9 @@ spec:
|
||||||
- --metrics-prefix={{ .Values.prometheus.metrics.prefix }}
|
- --metrics-prefix={{ .Values.prometheus.metrics.prefix }}
|
||||||
- --metrics-labels=app_type
|
- --metrics-labels=app_type
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{ if .Values.webhook.leaderElection.enable }}
|
|
||||||
- --leader-election=true
|
- --leader-election=true
|
||||||
- --leader-election-lock-name={{ include "spark-operator.webhook.leaderElectionName" . }}
|
- --leader-election-lock-name={{ include "spark-operator.webhook.leaderElectionName" . }}
|
||||||
- --leader-election-lock-namespace={{ .Release.Namespace }}
|
- --leader-election-lock-namespace={{ .Release.Namespace }}
|
||||||
{{- else -}}
|
|
||||||
- --leader-election=false
|
|
||||||
{{- end }}
|
|
||||||
ports:
|
ports:
|
||||||
- name: {{ .Values.webhook.portName | quote }}
|
- name: {{ .Values.webhook.portName | quote }}
|
||||||
containerPort: {{ .Values.webhook.port }}
|
containerPort: {{ .Values.webhook.port }}
|
||||||
|
@ -104,7 +94,7 @@ spec:
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.webhook.volumeMounts }}
|
{{- with .Values.webhook.volumeMounts }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 10 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.webhook.resources }}
|
{{- with .Values.webhook.resources }}
|
||||||
resources:
|
resources:
|
||||||
|
@ -133,7 +123,7 @@ spec:
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.webhook.volumes }}
|
{{- with .Values.webhook.volumes }}
|
||||||
volumes:
|
volumes:
|
||||||
{{- toYaml . | nindent 6 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.webhook.nodeSelector }}
|
{{- with .Values.webhook.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
|
@ -151,7 +141,6 @@ spec:
|
||||||
priorityClassName: {{ . }}
|
priorityClassName: {{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
serviceAccountName: {{ include "spark-operator.webhook.serviceAccountName" . }}
|
serviceAccountName: {{ include "spark-operator.webhook.serviceAccountName" . }}
|
||||||
automountServiceAccountToken: {{ .Values.webhook.serviceAccount.automountServiceAccountToken }}
|
|
||||||
{{- with .Values.webhook.podSecurityContext }}
|
{{- with .Values.webhook.podSecurityContext }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
|
|
@ -21,10 +21,6 @@ metadata:
|
||||||
name: {{ include "spark-operator.webhook.name" . }}
|
name: {{ include "spark-operator.webhook.name" . }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "spark-operator.webhook.labels" . | nindent 4 }}
|
{{- include "spark-operator.webhook.labels" . | nindent 4 }}
|
||||||
{{- if .Values.certManager.enable }}
|
|
||||||
annotations:
|
|
||||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "spark-operator.certManager.certificate.name" . }}
|
|
||||||
{{- end }}
|
|
||||||
webhooks:
|
webhooks:
|
||||||
- name: mutate--v1-pod.sparkoperator.k8s.io
|
- name: mutate--v1-pod.sparkoperator.k8s.io
|
||||||
admissionReviewVersions: ["v1"]
|
admissionReviewVersions: ["v1"]
|
||||||
|
|
|
@ -107,7 +107,6 @@ rules:
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- update
|
- update
|
||||||
{{- if .Values.webhook.leaderElection.enable }}
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- coordination.k8s.io
|
- coordination.k8s.io
|
||||||
resources:
|
resources:
|
||||||
|
@ -123,7 +122,6 @@ rules:
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- update
|
- update
|
||||||
{{- end }}
|
|
||||||
{{- if has .Release.Namespace .Values.spark.jobNamespaces }}
|
{{- if has .Release.Namespace .Values.spark.jobNamespaces }}
|
||||||
{{ include "spark-operator.webhook.policyRules" . }}
|
{{ include "spark-operator.webhook.policyRules" . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -18,7 +18,6 @@ limitations under the License.
|
||||||
{{- if .Values.webhook.serviceAccount.create -}}
|
{{- if .Values.webhook.serviceAccount.create -}}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
automountServiceAccountToken: {{ .Values.webhook.serviceAccount.automountServiceAccountToken }}
|
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "spark-operator.webhook.serviceAccountName" . }}
|
name: {{ include "spark-operator.webhook.serviceAccountName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
|
|
@ -21,10 +21,6 @@ metadata:
|
||||||
name: {{ include "spark-operator.webhook.name" . }}
|
name: {{ include "spark-operator.webhook.name" . }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "spark-operator.webhook.labels" . | nindent 4 }}
|
{{- include "spark-operator.webhook.labels" . | nindent 4 }}
|
||||||
{{- if .Values.certManager.enable }}
|
|
||||||
annotations:
|
|
||||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "spark-operator.certManager.certificate.name" . }}
|
|
||||||
{{- end }}
|
|
||||||
webhooks:
|
webhooks:
|
||||||
- name: validate-sparkoperator-k8s-io-v1beta2-sparkapplication.sparkoperator.k8s.io
|
- name: validate-sparkoperator-k8s-io-v1beta2-sparkapplication.sparkoperator.k8s.io
|
||||||
admissionReviewVersions: ["v1"]
|
admissionReviewVersions: ["v1"]
|
||||||
|
|
|
@ -1,134 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2025 The Kubeflow authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
suite: Test CertManager Certificate
|
|
||||||
|
|
||||||
templates:
|
|
||||||
- certmanager/certificate.yaml
|
|
||||||
|
|
||||||
release:
|
|
||||||
name: spark-operator
|
|
||||||
namespace: spark-operator
|
|
||||||
|
|
||||||
tests:
|
|
||||||
- it: Should not create Certificate if `webhook.enable` is `false`
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Certificate
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: false
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
asserts:
|
|
||||||
- hasDocuments:
|
|
||||||
count: 0
|
|
||||||
|
|
||||||
- it: Should not create Certificate if `certManager.enable` is `false`
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Certificate
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: false
|
|
||||||
asserts:
|
|
||||||
- hasDocuments:
|
|
||||||
count: 0
|
|
||||||
|
|
||||||
- it: Should create Certificate if `webhook.enable` is `true` and `certManager.enable` is `true`
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Certificate
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
asserts:
|
|
||||||
- containsDocument:
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
name: spark-operator-certificate
|
|
||||||
namespace: spark-operator
|
|
||||||
|
|
||||||
- it: Should fail if the cluster does not support `cert-manager.io/v1/Certificate`
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
asserts:
|
|
||||||
- failedTemplate:
|
|
||||||
errorMessage: "The cluster does not support the required API version `cert-manager.io/v1` for `Certificate`."
|
|
||||||
|
|
||||||
- it: Should use self signed issuer if `certManager.issuerRef` is not set
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Certificate
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
issuerRef:
|
|
||||||
group: cert-manager.io
|
|
||||||
kind: Issuer
|
|
||||||
name: test-issuer
|
|
||||||
asserts:
|
|
||||||
- equal:
|
|
||||||
path: spec.issuerRef
|
|
||||||
value:
|
|
||||||
group: cert-manager.io
|
|
||||||
kind: Issuer
|
|
||||||
name: test-issuer
|
|
||||||
|
|
||||||
- it: Should use the specified issuer if `certManager.issuerRef` is set
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Certificate
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
issuerRef:
|
|
||||||
group: cert-manager.io
|
|
||||||
kind: Issuer
|
|
||||||
name: test-issuer
|
|
||||||
asserts:
|
|
||||||
- equal:
|
|
||||||
path: spec.issuerRef
|
|
||||||
value:
|
|
||||||
group: cert-manager.io
|
|
||||||
kind: Issuer
|
|
||||||
name: test-issuer
|
|
||||||
|
|
||||||
- it: Should use the specified duration if `certManager.duration` is set
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Certificate
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
duration: 8760h
|
|
||||||
asserts:
|
|
||||||
- equal:
|
|
||||||
path: spec.duration
|
|
||||||
value: 8760h
|
|
|
@ -1,95 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2025 The Kubeflow authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
suite: Test CertManager Issuer
|
|
||||||
|
|
||||||
templates:
|
|
||||||
- certmanager/issuer.yaml
|
|
||||||
|
|
||||||
release:
|
|
||||||
name: spark-operator
|
|
||||||
namespace: spark-operator
|
|
||||||
|
|
||||||
tests:
|
|
||||||
- it: Should not create Issuer if `webhook.enable` is `false`
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Issuer
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: false
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
asserts:
|
|
||||||
- hasDocuments:
|
|
||||||
count: 0
|
|
||||||
|
|
||||||
- it: Should not create Issuer if `certManager.enable` is `false`
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Issuer
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: false
|
|
||||||
asserts:
|
|
||||||
- hasDocuments:
|
|
||||||
count: 0
|
|
||||||
|
|
||||||
- it: Should not create Issuer if `certManager.issuerRef` is set
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Issuer
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
issuerRef:
|
|
||||||
group: cert-manager.io
|
|
||||||
kind: Issuer
|
|
||||||
name: test-issuer
|
|
||||||
asserts:
|
|
||||||
- hasDocuments:
|
|
||||||
count: 0
|
|
||||||
|
|
||||||
- it: Should fail if the cluster does not support `cert-manager.io/v1/Issuer`
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
asserts:
|
|
||||||
- failedTemplate:
|
|
||||||
errorMessage: "The cluster does not support the required API version `cert-manager.io/v1` for `Issuer`."
|
|
||||||
|
|
||||||
- it: Should create Issuer if `webhook.enable` is `true` and `certManager.enable` is `true`
|
|
||||||
capabilities:
|
|
||||||
apiVersions:
|
|
||||||
- cert-manager.io/v1/Issuer
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
enable: true
|
|
||||||
certManager:
|
|
||||||
enable: true
|
|
||||||
issuerRef: null
|
|
||||||
asserts:
|
|
||||||
- containsDocument:
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Issuer
|
|
||||||
name: spark-operator-self-signed-issuer
|
|
||||||
namespace: spark-operator
|
|
|
@ -171,50 +171,6 @@ tests:
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
||||||
content: --ingress-url-format={{$appName}}.example.com/{{$appNamespace}}/{{$appName}}
|
content: --ingress-url-format={{$appName}}.example.com/{{$appNamespace}}/{{$appName}}
|
||||||
|
|
||||||
- it: Should contain `--ingress-class-name` arg if `controller.uiIngress.enable` is set to `true` and `controller.uiIngress.ingressClassName` is set
|
|
||||||
set:
|
|
||||||
controller:
|
|
||||||
uiService:
|
|
||||||
enable: true
|
|
||||||
uiIngress:
|
|
||||||
enable: true
|
|
||||||
ingressClassName: nginx
|
|
||||||
asserts:
|
|
||||||
- contains:
|
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
|
||||||
content: --ingress-class-name=nginx
|
|
||||||
|
|
||||||
- it: Should contain `--ingress-tls` arg if `controller.uiIngress.enable` is set to `true` and `controller.uiIngress.tls` is set
|
|
||||||
set:
|
|
||||||
controller:
|
|
||||||
uiService:
|
|
||||||
enable: true
|
|
||||||
uiIngress:
|
|
||||||
enable: true
|
|
||||||
tls:
|
|
||||||
- hosts:
|
|
||||||
- "*.test.com"
|
|
||||||
secretName: test-secret
|
|
||||||
asserts:
|
|
||||||
- contains:
|
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
|
||||||
content: '--ingress-tls=[{"hosts":["*.test.com"],"secretName":"test-secret"}]'
|
|
||||||
|
|
||||||
- it: Should contain `--ingress-annotations` arg if `controller.uiIngress.enable` is set to `true` and `controller.uiIngress.annotations` is set
|
|
||||||
set:
|
|
||||||
controller:
|
|
||||||
uiService:
|
|
||||||
enable: true
|
|
||||||
uiIngress:
|
|
||||||
enable: true
|
|
||||||
annotations:
|
|
||||||
cert-manager.io/cluster-issuer: "letsencrypt"
|
|
||||||
kubernetes.io/ingress.class: nginx
|
|
||||||
asserts:
|
|
||||||
- contains:
|
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
|
||||||
content: '--ingress-annotations={"cert-manager.io/cluster-issuer":"letsencrypt","kubernetes.io/ingress.class":"nginx"}'
|
|
||||||
|
|
||||||
- it: Should contain `--enable-batch-scheduler` arg if `controller.batchScheduler.enable` is `true`
|
- it: Should contain `--enable-batch-scheduler` arg if `controller.batchScheduler.enable` is `true`
|
||||||
set:
|
set:
|
||||||
controller:
|
controller:
|
||||||
|
@ -245,7 +201,6 @@ tests:
|
||||||
portName: test-port
|
portName: test-port
|
||||||
endpoint: /test-endpoint
|
endpoint: /test-endpoint
|
||||||
prefix: test-prefix
|
prefix: test-prefix
|
||||||
jobStartLatencyBuckets: "180,360,420,690"
|
|
||||||
asserts:
|
asserts:
|
||||||
- contains:
|
- contains:
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
||||||
|
@ -262,9 +217,6 @@ tests:
|
||||||
- contains:
|
- contains:
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
||||||
content: --metrics-labels=app_type
|
content: --metrics-labels=app_type
|
||||||
- contains:
|
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
|
||||||
content: --metrics-job-start-latency-buckets=180,360,420,690
|
|
||||||
|
|
||||||
- it: Should enable leader election by default
|
- it: Should enable leader election by default
|
||||||
asserts:
|
asserts:
|
||||||
|
@ -278,16 +230,6 @@ tests:
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
||||||
content: --leader-election-lock-namespace=spark-operator
|
content: --leader-election-lock-namespace=spark-operator
|
||||||
|
|
||||||
- it: Should disable leader election if `controller.leaderElection.enable` is set to `false`
|
|
||||||
set:
|
|
||||||
controller:
|
|
||||||
leaderElection:
|
|
||||||
enable: false
|
|
||||||
asserts:
|
|
||||||
- contains:
|
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
|
||||||
content: --leader-election=false
|
|
||||||
|
|
||||||
- it: Should add metric ports if `prometheus.metrics.enable` is true
|
- it: Should add metric ports if `prometheus.metrics.enable` is true
|
||||||
set:
|
set:
|
||||||
prometheus:
|
prometheus:
|
||||||
|
@ -400,43 +342,16 @@ tests:
|
||||||
set:
|
set:
|
||||||
controller:
|
controller:
|
||||||
securityContext:
|
securityContext:
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
runAsUser: 1000
|
runAsUser: 1000
|
||||||
runAsGroup: 2000
|
runAsGroup: 2000
|
||||||
fsGroup: 3000
|
fsGroup: 3000
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsNonRoot: true
|
|
||||||
privileged: false
|
|
||||||
asserts:
|
asserts:
|
||||||
- equal:
|
- equal:
|
||||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
path: spec.template.spec.containers[0].securityContext
|
||||||
value: true
|
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.runAsUser
|
|
||||||
value: 1000
|
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.runAsGroup
|
|
||||||
value: 2000
|
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.fsGroup
|
|
||||||
value: 3000
|
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation
|
|
||||||
value: false
|
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.capabilities
|
|
||||||
value:
|
value:
|
||||||
drop:
|
runAsUser: 1000
|
||||||
- ALL
|
runAsGroup: 2000
|
||||||
- equal:
|
fsGroup: 3000
|
||||||
path: spec.template.spec.containers[0].securityContext.runAsNonRoot
|
|
||||||
value: true
|
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.privileged
|
|
||||||
value: false
|
|
||||||
|
|
||||||
- it: Should add sidecars if `controller.sidecars` is set
|
- it: Should add sidecars if `controller.sidecars` is set
|
||||||
set:
|
set:
|
||||||
|
@ -702,28 +617,10 @@ tests:
|
||||||
- it: Should contain `--workqueue-ratelimiter-max-delay` arg if `controller.workqueueRateLimiter.maxDelay.enable` is set to `true`
|
- it: Should contain `--workqueue-ratelimiter-max-delay` arg if `controller.workqueueRateLimiter.maxDelay.enable` is set to `true`
|
||||||
set:
|
set:
|
||||||
controller:
|
controller:
|
||||||
maxDelay:
|
maxDelay:
|
||||||
enable: false
|
enable: false
|
||||||
duration: 1h
|
duration: 1h
|
||||||
asserts:
|
asserts:
|
||||||
- notContains:
|
- notContains:
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
||||||
content: --workqueue-ratelimiter-max-delay=1h
|
content: --workqueue-ratelimiter-max-delay=1h
|
||||||
|
|
||||||
- it: Should contain `driver-pod-creation-grace-period` arg if `controller.driverPodCreationGracePeriod` is set
|
|
||||||
set:
|
|
||||||
controller:
|
|
||||||
driverPodCreationGracePeriod: 30s
|
|
||||||
asserts:
|
|
||||||
- contains:
|
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
|
||||||
content: --driver-pod-creation-grace-period=30s
|
|
||||||
|
|
||||||
- it: Should contain `--max-tracked-executor-per-app` arg if `controller.maxTrackedExecutorPerApp` is set
|
|
||||||
set:
|
|
||||||
controller:
|
|
||||||
maxTrackedExecutorPerApp: 123
|
|
||||||
asserts:
|
|
||||||
- contains:
|
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
|
|
||||||
content: --max-tracked-executor-per-app=123
|
|
||||||
|
|
|
@ -173,16 +173,6 @@ tests:
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args
|
path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args
|
||||||
content: --leader-election-lock-namespace=spark-operator
|
content: --leader-election-lock-namespace=spark-operator
|
||||||
|
|
||||||
- it: Should disable leader election if `webhook.leaderElection.enable` is set to `false`
|
|
||||||
set:
|
|
||||||
webhook:
|
|
||||||
leaderElection:
|
|
||||||
enable: false
|
|
||||||
asserts:
|
|
||||||
- contains:
|
|
||||||
path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args
|
|
||||||
content: --leader-election=false
|
|
||||||
|
|
||||||
- it: Should add webhook port
|
- it: Should add webhook port
|
||||||
set:
|
set:
|
||||||
webhook:
|
webhook:
|
||||||
|
@ -309,20 +299,10 @@ tests:
|
||||||
set:
|
set:
|
||||||
webhook:
|
webhook:
|
||||||
securityContext:
|
securityContext:
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
runAsUser: 1000
|
runAsUser: 1000
|
||||||
runAsGroup: 2000
|
runAsGroup: 2000
|
||||||
fsGroup: 3000
|
fsGroup: 3000
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsNonRoot: true
|
|
||||||
privileged: false
|
|
||||||
asserts:
|
asserts:
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
|
||||||
value: true
|
|
||||||
- equal:
|
- equal:
|
||||||
path: spec.template.spec.containers[0].securityContext.runAsUser
|
path: spec.template.spec.containers[0].securityContext.runAsUser
|
||||||
value: 1000
|
value: 1000
|
||||||
|
@ -332,20 +312,6 @@ tests:
|
||||||
- equal:
|
- equal:
|
||||||
path: spec.template.spec.containers[0].securityContext.fsGroup
|
path: spec.template.spec.containers[0].securityContext.fsGroup
|
||||||
value: 3000
|
value: 3000
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation
|
|
||||||
value: false
|
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.capabilities
|
|
||||||
value:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.runAsNonRoot
|
|
||||||
value: true
|
|
||||||
- equal:
|
|
||||||
path: spec.template.spec.containers[0].securityContext.privileged
|
|
||||||
value: false
|
|
||||||
|
|
||||||
- it: Should add sidecars if `webhook.sidecars` is set
|
- it: Should add sidecars if `webhook.sidecars` is set
|
||||||
set:
|
set:
|
||||||
|
|
|
@ -29,9 +29,9 @@ commonLabels: {}
|
||||||
|
|
||||||
image:
|
image:
|
||||||
# -- Image registry.
|
# -- Image registry.
|
||||||
registry: ghcr.io
|
registry: docker.io
|
||||||
# -- Image repository.
|
# -- Image repository.
|
||||||
repository: kubeflow/spark-operator/controller
|
repository: kubeflow/spark-operator
|
||||||
# -- Image tag.
|
# -- Image tag.
|
||||||
# @default -- If not set, the chart appVersion will be used.
|
# @default -- If not set, the chart appVersion will be used.
|
||||||
tag: ""
|
tag: ""
|
||||||
|
@ -45,25 +45,12 @@ controller:
|
||||||
# -- Number of replicas of controller.
|
# -- Number of replicas of controller.
|
||||||
replicas: 1
|
replicas: 1
|
||||||
|
|
||||||
leaderElection:
|
|
||||||
# -- Specifies whether to enable leader election for controller.
|
|
||||||
enable: true
|
|
||||||
|
|
||||||
# -- Reconcile concurrency, higher values might increase memory usage.
|
# -- Reconcile concurrency, higher values might increase memory usage.
|
||||||
workers: 10
|
workers: 10
|
||||||
|
|
||||||
# -- Configure the verbosity of logging, can be one of `debug`, `info`, `error`.
|
# -- Configure the verbosity of logging, can be one of `debug`, `info`, `error`.
|
||||||
logLevel: info
|
logLevel: info
|
||||||
|
|
||||||
# -- Configure the encoder of logging, can be one of `console` or `json`.
|
|
||||||
logEncoder: console
|
|
||||||
|
|
||||||
# -- Grace period after a successful spark-submit when driver pod not found errors will be retried. Useful if the driver pod can take some time to be created.
|
|
||||||
driverPodCreationGracePeriod: 10s
|
|
||||||
|
|
||||||
# -- Specifies the maximum number of Executor pods that can be tracked by the controller per SparkApplication.
|
|
||||||
maxTrackedExecutorPerApp: 1000
|
|
||||||
|
|
||||||
uiService:
|
uiService:
|
||||||
# -- Specifies whether to create service for Spark web UI.
|
# -- Specifies whether to create service for Spark web UI.
|
||||||
enable: true
|
enable: true
|
||||||
|
@ -75,17 +62,6 @@ controller:
|
||||||
# -- Ingress URL format.
|
# -- Ingress URL format.
|
||||||
# Required if `controller.uiIngress.enable` is true.
|
# Required if `controller.uiIngress.enable` is true.
|
||||||
urlFormat: ""
|
urlFormat: ""
|
||||||
# -- Optionally set the ingressClassName.
|
|
||||||
ingressClassName: ""
|
|
||||||
# -- Optionally set default TLS configuration for the Spark UI's ingress. `ingressTLS` in the SparkApplication spec overrides this.
|
|
||||||
tls: []
|
|
||||||
# - hosts:
|
|
||||||
# - "*.example.com"
|
|
||||||
# secretName: "example-secret"
|
|
||||||
# -- Optionally set default ingress annotations for the Spark UI's ingress. `ingressAnnotations` in the SparkApplication spec overrides this.
|
|
||||||
annotations: {}
|
|
||||||
# key1: value1
|
|
||||||
# key2: value2
|
|
||||||
|
|
||||||
batchScheduler:
|
batchScheduler:
|
||||||
# -- Specifies whether to enable batch scheduler for spark jobs scheduling.
|
# -- Specifies whether to enable batch scheduler for spark jobs scheduling.
|
||||||
|
@ -106,8 +82,6 @@ controller:
|
||||||
name: ""
|
name: ""
|
||||||
# -- Extra annotations for the controller service account.
|
# -- Extra annotations for the controller service account.
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# -- Auto-mount service account token to the controller pods.
|
|
||||||
automountServiceAccountToken: true
|
|
||||||
|
|
||||||
rbac:
|
rbac:
|
||||||
# -- Specifies whether to create RBAC resources for the controller.
|
# -- Specifies whether to create RBAC resources for the controller.
|
||||||
|
@ -126,11 +100,7 @@ controller:
|
||||||
# key2: value2
|
# key2: value2
|
||||||
|
|
||||||
# -- Volumes for controller pods.
|
# -- Volumes for controller pods.
|
||||||
volumes:
|
volumes: []
|
||||||
# Create a tmp directory to write Spark artifacts to for deployed Spark apps.
|
|
||||||
- name: tmp
|
|
||||||
emptyDir:
|
|
||||||
sizeLimit: 1Gi
|
|
||||||
|
|
||||||
# -- Node selector for controller pods.
|
# -- Node selector for controller pods.
|
||||||
nodeSelector: {}
|
nodeSelector: {}
|
||||||
|
@ -145,8 +115,10 @@ controller:
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
# -- Security context for controller pods.
|
# -- Security context for controller pods.
|
||||||
podSecurityContext:
|
podSecurityContext: {}
|
||||||
fsGroup: 185
|
# runAsUser: 1000
|
||||||
|
# runAsGroup: 2000
|
||||||
|
# fsGroup: 3000
|
||||||
|
|
||||||
# -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
|
# -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
|
||||||
# Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/).
|
# Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/).
|
||||||
|
@ -166,11 +138,7 @@ controller:
|
||||||
envFrom: []
|
envFrom: []
|
||||||
|
|
||||||
# -- Volume mounts for controller containers.
|
# -- Volume mounts for controller containers.
|
||||||
volumeMounts:
|
volumeMounts: []
|
||||||
# Mount a tmp directory to write Spark artifacts to for deployed Spark apps.
|
|
||||||
- name: tmp
|
|
||||||
mountPath: "/tmp"
|
|
||||||
readOnly: false
|
|
||||||
|
|
||||||
# -- Pod resource requests and limits for controller containers.
|
# -- Pod resource requests and limits for controller containers.
|
||||||
# Note, that each job submission will spawn a JVM within the controller pods using "/usr/local/openjdk-11/bin/java -Xmx128m".
|
# Note, that each job submission will spawn a JVM within the controller pods using "/usr/local/openjdk-11/bin/java -Xmx128m".
|
||||||
|
@ -185,16 +153,10 @@ controller:
|
||||||
# memory: 300Mi
|
# memory: 300Mi
|
||||||
|
|
||||||
# -- Security context for controller containers.
|
# -- Security context for controller containers.
|
||||||
securityContext:
|
securityContext: {}
|
||||||
readOnlyRootFilesystem: true
|
# runAsUser: 1000
|
||||||
privileged: false
|
# runAsGroup: 2000
|
||||||
allowPrivilegeEscalation: false
|
# fsGroup: 3000
|
||||||
runAsNonRoot: true
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
||||||
# -- Sidecar containers for controller pods.
|
# -- Sidecar containers for controller pods.
|
||||||
sidecars: []
|
sidecars: []
|
||||||
|
@ -236,16 +198,9 @@ webhook:
|
||||||
# -- Number of replicas of webhook server.
|
# -- Number of replicas of webhook server.
|
||||||
replicas: 1
|
replicas: 1
|
||||||
|
|
||||||
leaderElection:
|
|
||||||
# -- Specifies whether to enable leader election for webhook.
|
|
||||||
enable: true
|
|
||||||
|
|
||||||
# -- Configure the verbosity of logging, can be one of `debug`, `info`, `error`.
|
# -- Configure the verbosity of logging, can be one of `debug`, `info`, `error`.
|
||||||
logLevel: info
|
logLevel: info
|
||||||
|
|
||||||
# -- Configure the encoder of logging, can be one of `console` or `json`.
|
|
||||||
logEncoder: console
|
|
||||||
|
|
||||||
# -- Specifies webhook port.
|
# -- Specifies webhook port.
|
||||||
port: 9443
|
port: 9443
|
||||||
|
|
||||||
|
@ -270,8 +225,6 @@ webhook:
|
||||||
name: ""
|
name: ""
|
||||||
# -- Extra annotations for the webhook service account.
|
# -- Extra annotations for the webhook service account.
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# -- Auto-mount service account token to the webhook pods.
|
|
||||||
automountServiceAccountToken: true
|
|
||||||
|
|
||||||
rbac:
|
rbac:
|
||||||
# -- Specifies whether to create RBAC resources for the webhook.
|
# -- Specifies whether to create RBAC resources for the webhook.
|
||||||
|
@ -293,11 +246,7 @@ webhook:
|
||||||
sidecars: []
|
sidecars: []
|
||||||
|
|
||||||
# -- Volumes for webhook pods.
|
# -- Volumes for webhook pods.
|
||||||
volumes:
|
volumes: []
|
||||||
# Create a dir for the webhook to generate its certificates in.
|
|
||||||
- name: serving-certs
|
|
||||||
emptyDir:
|
|
||||||
sizeLimit: 500Mi
|
|
||||||
|
|
||||||
# -- Node selector for webhook pods.
|
# -- Node selector for webhook pods.
|
||||||
nodeSelector: {}
|
nodeSelector: {}
|
||||||
|
@ -312,8 +261,10 @@ webhook:
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
# -- Security context for webhook pods.
|
# -- Security context for webhook pods.
|
||||||
podSecurityContext:
|
podSecurityContext: {}
|
||||||
fsGroup: 185
|
# runAsUser: 1000
|
||||||
|
# runAsGroup: 2000
|
||||||
|
# fsGroup: 3000
|
||||||
|
|
||||||
# -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
|
# -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
|
||||||
# Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/).
|
# Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/).
|
||||||
|
@ -333,13 +284,7 @@ webhook:
|
||||||
envFrom: []
|
envFrom: []
|
||||||
|
|
||||||
# -- Volume mounts for webhook containers.
|
# -- Volume mounts for webhook containers.
|
||||||
volumeMounts:
|
volumeMounts: []
|
||||||
# Mount a dir for the webhook to generate its certificates in.
|
|
||||||
- name: serving-certs
|
|
||||||
mountPath: /etc/k8s-webhook-server/serving-certs
|
|
||||||
subPath: serving-certs
|
|
||||||
readOnly: false
|
|
||||||
|
|
||||||
|
|
||||||
# -- Pod resource requests and limits for webhook pods.
|
# -- Pod resource requests and limits for webhook pods.
|
||||||
resources: {}
|
resources: {}
|
||||||
|
@ -351,16 +296,10 @@ webhook:
|
||||||
# memory: 300Mi
|
# memory: 300Mi
|
||||||
|
|
||||||
# -- Security context for webhook containers.
|
# -- Security context for webhook containers.
|
||||||
securityContext:
|
securityContext: {}
|
||||||
readOnlyRootFilesystem: true
|
# runAsUser: 1000
|
||||||
privileged: false
|
# runAsGroup: 2000
|
||||||
allowPrivilegeEscalation: false
|
# fsGroup: 3000
|
||||||
runAsNonRoot: true
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
||||||
# Pod disruption budget for webhook to avoid service degradation.
|
# Pod disruption budget for webhook to avoid service degradation.
|
||||||
podDisruptionBudget:
|
podDisruptionBudget:
|
||||||
|
@ -385,8 +324,6 @@ spark:
|
||||||
name: ""
|
name: ""
|
||||||
# -- Optional annotations for the spark service account.
|
# -- Optional annotations for the spark service account.
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# -- Auto-mount service account token to the spark applications pods.
|
|
||||||
automountServiceAccountToken: true
|
|
||||||
|
|
||||||
rbac:
|
rbac:
|
||||||
# -- Specifies whether to create RBAC resources for spark applications.
|
# -- Specifies whether to create RBAC resources for spark applications.
|
||||||
|
@ -406,8 +343,6 @@ prometheus:
|
||||||
endpoint: /metrics
|
endpoint: /metrics
|
||||||
# -- Metrics prefix, will be added to all exported metrics.
|
# -- Metrics prefix, will be added to all exported metrics.
|
||||||
prefix: ""
|
prefix: ""
|
||||||
# -- Job Start Latency histogram buckets. Specified in seconds.
|
|
||||||
jobStartLatencyBuckets: "30,60,90,120,150,180,210,240,270,300"
|
|
||||||
|
|
||||||
# Prometheus pod monitor for controller pods
|
# Prometheus pod monitor for controller pods
|
||||||
podMonitor:
|
podMonitor:
|
||||||
|
@ -422,22 +357,3 @@ prometheus:
|
||||||
podMetricsEndpoint:
|
podMetricsEndpoint:
|
||||||
scheme: http
|
scheme: http
|
||||||
interval: 5s
|
interval: 5s
|
||||||
|
|
||||||
certManager:
|
|
||||||
# -- Specifies whether to use [cert-manager](https://cert-manager.io) to generate certificate for webhook.
|
|
||||||
# `webhook.enable` must be set to `true` to enable cert-manager.
|
|
||||||
enable: false
|
|
||||||
# -- The reference to the issuer.
|
|
||||||
# @default -- A self-signed issuer will be created and used if not specified.
|
|
||||||
issuerRef: {}
|
|
||||||
# group: cert-manager.io
|
|
||||||
# kind: ClusterIssuer
|
|
||||||
# name: selfsigned
|
|
||||||
# -- The duration of the certificate validity (e.g. `2160h`).
|
|
||||||
# See [cert-manager.io/v1.Certificate](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.Certificate).
|
|
||||||
# @default -- `2160h` (90 days) will be used if not specified.
|
|
||||||
duration: ""
|
|
||||||
# -- The duration before the certificate expiration to renew the certificate (e.g. `720h`).
|
|
||||||
# See [cert-manager.io/v1.Certificate](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.Certificate).
|
|
||||||
# @default -- 1/3 of issued certificate’s lifetime.
|
|
||||||
renewBefore: ""
|
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 The Kubeflow authors.
|
Copyright 2024 The Kubeflow authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
Unless required by applicable law or agreed to in writing, software
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
@ -14,8 +14,18 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package v1alpha1
|
package main
|
||||||
|
|
||||||
// SetSparkConnectDefaults sets default values for certain fields of a SparkConnect.
|
import (
|
||||||
func SetSparkConnectDefaults(conn *SparkConnect) {
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/kubeflow/spark-operator/cmd/operator"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := operator.NewCommand().Execute(); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -18,9 +18,7 @@ package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
"slices"
|
||||||
"time"
|
"time"
|
||||||
|
@ -35,7 +33,6 @@ import (
|
||||||
"go.uber.org/zap/zapcore"
|
"go.uber.org/zap/zapcore"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
@ -52,19 +49,18 @@ import (
|
||||||
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
|
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
schedulingv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1"
|
schedulingv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1"
|
||||||
|
|
||||||
sparkoperator "github.com/kubeflow/spark-operator/v2"
|
sparkoperator "github.com/kubeflow/spark-operator"
|
||||||
"github.com/kubeflow/spark-operator/v2/api/v1alpha1"
|
"github.com/kubeflow/spark-operator/api/v1beta1"
|
||||||
"github.com/kubeflow/spark-operator/v2/api/v1beta2"
|
"github.com/kubeflow/spark-operator/api/v1beta2"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/controller/scheduledsparkapplication"
|
"github.com/kubeflow/spark-operator/internal/controller/scheduledsparkapplication"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/controller/sparkapplication"
|
"github.com/kubeflow/spark-operator/internal/controller/sparkapplication"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/controller/sparkconnect"
|
"github.com/kubeflow/spark-operator/internal/metrics"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/metrics"
|
"github.com/kubeflow/spark-operator/internal/scheduler"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/scheduler"
|
"github.com/kubeflow/spark-operator/internal/scheduler/kubescheduler"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/scheduler/kubescheduler"
|
"github.com/kubeflow/spark-operator/internal/scheduler/volcano"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/scheduler/volcano"
|
"github.com/kubeflow/spark-operator/internal/scheduler/yunikorn"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/scheduler/yunikorn"
|
"github.com/kubeflow/spark-operator/pkg/common"
|
||||||
"github.com/kubeflow/spark-operator/v2/pkg/common"
|
"github.com/kubeflow/spark-operator/pkg/util"
|
||||||
"github.com/kubeflow/spark-operator/v2/pkg/util"
|
|
||||||
// +kubebuilder:scaffold:imports
|
// +kubebuilder:scaffold:imports
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -77,9 +73,8 @@ var (
|
||||||
namespaces []string
|
namespaces []string
|
||||||
|
|
||||||
// Controller
|
// Controller
|
||||||
controllerThreads int
|
controllerThreads int
|
||||||
cacheSyncTimeout time.Duration
|
cacheSyncTimeout time.Duration
|
||||||
maxTrackedExecutorPerApp int
|
|
||||||
|
|
||||||
//WorkQueue
|
//WorkQueue
|
||||||
workqueueRateLimiterBucketQPS int
|
workqueueRateLimiterBucketQPS int
|
||||||
|
@ -92,11 +87,9 @@ var (
|
||||||
defaultBatchScheduler string
|
defaultBatchScheduler string
|
||||||
|
|
||||||
// Spark web UI service and ingress
|
// Spark web UI service and ingress
|
||||||
enableUIService bool
|
enableUIService bool
|
||||||
ingressClassName string
|
ingressClassName string
|
||||||
ingressURLFormat string
|
ingressURLFormat string
|
||||||
ingressTLS []networkingv1.IngressTLS
|
|
||||||
ingressAnnotations map[string]string
|
|
||||||
|
|
||||||
// Leader election
|
// Leader election
|
||||||
enableLeaderElection bool
|
enableLeaderElection bool
|
||||||
|
@ -106,8 +99,6 @@ var (
|
||||||
leaderElectionRenewDeadline time.Duration
|
leaderElectionRenewDeadline time.Duration
|
||||||
leaderElectionRetryPeriod time.Duration
|
leaderElectionRetryPeriod time.Duration
|
||||||
|
|
||||||
driverPodCreationGracePeriod time.Duration
|
|
||||||
|
|
||||||
// Metrics
|
// Metrics
|
||||||
enableMetrics bool
|
enableMetrics bool
|
||||||
metricsBindAddress string
|
metricsBindAddress string
|
||||||
|
@ -128,31 +119,17 @@ func init() {
|
||||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||||
utilruntime.Must(schedulingv1alpha1.AddToScheme(scheme))
|
utilruntime.Must(schedulingv1alpha1.AddToScheme(scheme))
|
||||||
|
|
||||||
utilruntime.Must(v1alpha1.AddToScheme(scheme))
|
utilruntime.Must(v1beta1.AddToScheme(scheme))
|
||||||
utilruntime.Must(v1beta2.AddToScheme(scheme))
|
utilruntime.Must(v1beta2.AddToScheme(scheme))
|
||||||
// +kubebuilder:scaffold:scheme
|
// +kubebuilder:scaffold:scheme
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStartCommand() *cobra.Command {
|
func NewStartCommand() *cobra.Command {
|
||||||
var ingressTLSstring string
|
|
||||||
var ingressAnnotationsString string
|
|
||||||
var command = &cobra.Command{
|
var command = &cobra.Command{
|
||||||
Use: "start",
|
Use: "start",
|
||||||
Short: "Start controller and webhook",
|
Short: "Start controller and webhook",
|
||||||
PreRunE: func(_ *cobra.Command, args []string) error {
|
PreRun: func(_ *cobra.Command, args []string) {
|
||||||
development = viper.GetBool("development")
|
development = viper.GetBool("development")
|
||||||
|
|
||||||
if ingressTLSstring != "" {
|
|
||||||
if err := json.Unmarshal([]byte(ingressTLSstring), &ingressTLS); err != nil {
|
|
||||||
return fmt.Errorf("failed parsing ingress-tls JSON string from CLI: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ingressAnnotationsString != "" {
|
|
||||||
if err := json.Unmarshal([]byte(ingressAnnotationsString), &ingressAnnotations); err != nil {
|
|
||||||
return fmt.Errorf("failed parsing ingress-annotations JSON string from CLI: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
},
|
||||||
Run: func(_ *cobra.Command, args []string) {
|
Run: func(_ *cobra.Command, args []string) {
|
||||||
sparkoperator.PrintVersion(false)
|
sparkoperator.PrintVersion(false)
|
||||||
|
@ -163,7 +140,6 @@ func NewStartCommand() *cobra.Command {
|
||||||
command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.")
|
command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.")
|
||||||
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset or contains empty string.")
|
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset or contains empty string.")
|
||||||
command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.")
|
command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.")
|
||||||
command.Flags().IntVar(&maxTrackedExecutorPerApp, "max-tracked-executor-per-app", 1000, "The maximum number of tracked executors per SparkApplication.")
|
|
||||||
|
|
||||||
command.Flags().IntVar(&workqueueRateLimiterBucketQPS, "workqueue-ratelimiter-bucket-qps", 10, "QPS of the bucket rate of the workqueue.")
|
command.Flags().IntVar(&workqueueRateLimiterBucketQPS, "workqueue-ratelimiter-bucket-qps", 10, "QPS of the bucket rate of the workqueue.")
|
||||||
command.Flags().IntVar(&workqueueRateLimiterBucketSize, "workqueue-ratelimiter-bucket-size", 100, "The token bucket size of the workqueue.")
|
command.Flags().IntVar(&workqueueRateLimiterBucketSize, "workqueue-ratelimiter-bucket-size", 100, "The token bucket size of the workqueue.")
|
||||||
|
@ -176,8 +152,6 @@ func NewStartCommand() *cobra.Command {
|
||||||
command.Flags().BoolVar(&enableUIService, "enable-ui-service", true, "Enable Spark Web UI service.")
|
command.Flags().BoolVar(&enableUIService, "enable-ui-service", true, "Enable Spark Web UI service.")
|
||||||
command.Flags().StringVar(&ingressClassName, "ingress-class-name", "", "Set ingressClassName for ingress resources created.")
|
command.Flags().StringVar(&ingressClassName, "ingress-class-name", "", "Set ingressClassName for ingress resources created.")
|
||||||
command.Flags().StringVar(&ingressURLFormat, "ingress-url-format", "", "Ingress URL format.")
|
command.Flags().StringVar(&ingressURLFormat, "ingress-url-format", "", "Ingress URL format.")
|
||||||
command.Flags().StringVar(&ingressTLSstring, "ingress-tls", "", "JSON format string for the default TLS config on the Spark UI ingresses. e.g. '[{\"hosts\":[\"*.example.com\"],\"secretName\":\"example-secret\"}]'. `ingressTLS` in the SparkApplication spec will override this value.")
|
|
||||||
command.Flags().StringVar(&ingressAnnotationsString, "ingress-annotations", "", "JSON format string for the default ingress annotations for the Spark UI ingresses. e.g. '[{\"cert-manager.io/cluster-issuer\": \"letsencrypt\"}]'. `ingressAnnotations` in the SparkApplication spec will override this value.")
|
|
||||||
|
|
||||||
command.Flags().BoolVar(&enableLeaderElection, "leader-election", false, "Enable leader election for controller manager. "+
|
command.Flags().BoolVar(&enableLeaderElection, "leader-election", false, "Enable leader election for controller manager. "+
|
||||||
"Enabling this will ensure there is only one active controller manager.")
|
"Enabling this will ensure there is only one active controller manager.")
|
||||||
|
@ -187,8 +161,6 @@ func NewStartCommand() *cobra.Command {
|
||||||
command.Flags().DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.")
|
command.Flags().DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.")
|
||||||
command.Flags().DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 4*time.Second, "Leader election retry period.")
|
command.Flags().DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 4*time.Second, "Leader election retry period.")
|
||||||
|
|
||||||
command.Flags().DurationVar(&driverPodCreationGracePeriod, "driver-pod-creation-grace-period", 10*time.Second, "Grace period after a successful spark-submit when driver pod not found errors will be retried. Useful if the driver pod can take some time to be created.")
|
|
||||||
|
|
||||||
command.Flags().BoolVar(&enableMetrics, "enable-metrics", false, "Enable metrics.")
|
command.Flags().BoolVar(&enableMetrics, "enable-metrics", false, "Enable metrics.")
|
||||||
command.Flags().StringVar(&metricsBindAddress, "metrics-bind-address", "0", "The address the metric endpoint binds to. "+
|
command.Flags().StringVar(&metricsBindAddress, "metrics-bind-address", "0", "The address the metric endpoint binds to. "+
|
||||||
"Use the port :8080. If not set, it will be 0 in order to disable the metrics server")
|
"Use the port :8080. If not set, it will be 0 in order to disable the metrics server")
|
||||||
|
@ -286,8 +258,6 @@ func start() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sparkSubmitter := &sparkapplication.SparkSubmitter{}
|
|
||||||
|
|
||||||
// Setup controller for SparkApplication.
|
// Setup controller for SparkApplication.
|
||||||
if err = sparkapplication.NewReconciler(
|
if err = sparkapplication.NewReconciler(
|
||||||
mgr,
|
mgr,
|
||||||
|
@ -295,7 +265,6 @@ func start() {
|
||||||
mgr.GetClient(),
|
mgr.GetClient(),
|
||||||
mgr.GetEventRecorderFor("spark-application-controller"),
|
mgr.GetEventRecorderFor("spark-application-controller"),
|
||||||
registry,
|
registry,
|
||||||
sparkSubmitter,
|
|
||||||
newSparkApplicationReconcilerOptions(),
|
newSparkApplicationReconcilerOptions(),
|
||||||
).SetupWithManager(mgr, newControllerOptions()); err != nil {
|
).SetupWithManager(mgr, newControllerOptions()); err != nil {
|
||||||
logger.Error(err, "Failed to create controller", "controller", "SparkApplication")
|
logger.Error(err, "Failed to create controller", "controller", "SparkApplication")
|
||||||
|
@ -314,18 +283,6 @@ func start() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup controller for SparkConnect.
|
|
||||||
if err = sparkconnect.NewReconciler(
|
|
||||||
mgr,
|
|
||||||
mgr.GetScheme(),
|
|
||||||
mgr.GetClient(),
|
|
||||||
mgr.GetEventRecorderFor("SparkConnect"),
|
|
||||||
newSparkConnectReconcilerOptions(),
|
|
||||||
).SetupWithManager(mgr, newControllerOptions()); err != nil {
|
|
||||||
logger.Error(err, "Failed to create controller", "controller", "SparkConnect")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:scaffold:builder
|
// +kubebuilder:scaffold:builder
|
||||||
|
|
||||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||||
|
@ -351,12 +308,19 @@ func setupLog() {
|
||||||
logzap.UseFlagOptions(&zapOptions),
|
logzap.UseFlagOptions(&zapOptions),
|
||||||
func(o *logzap.Options) {
|
func(o *logzap.Options) {
|
||||||
o.Development = development
|
o.Development = development
|
||||||
|
}, func(o *logzap.Options) {
|
||||||
o.ZapOpts = append(o.ZapOpts, zap.AddCaller())
|
o.ZapOpts = append(o.ZapOpts, zap.AddCaller())
|
||||||
o.EncoderConfigOptions = append(o.EncoderConfigOptions, func(config *zapcore.EncoderConfig) {
|
}, func(o *logzap.Options) {
|
||||||
config.EncodeLevel = zapcore.CapitalLevelEncoder
|
var config zapcore.EncoderConfig
|
||||||
config.EncodeTime = zapcore.ISO8601TimeEncoder
|
if !development {
|
||||||
config.EncodeCaller = zapcore.ShortCallerEncoder
|
config = zap.NewProductionEncoderConfig()
|
||||||
})
|
} else {
|
||||||
|
config = zap.NewDevelopmentEncoderConfig()
|
||||||
|
}
|
||||||
|
config.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
||||||
|
config.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||||
|
config.EncodeCaller = zapcore.ShortCallerEncoder
|
||||||
|
o.Encoder = zapcore.NewConsoleEncoder(config)
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -398,12 +362,10 @@ func newCacheOptions() cache.Options {
|
||||||
common.LabelLaunchedBySparkOperator: "true",
|
common.LabelLaunchedBySparkOperator: "true",
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
&corev1.ConfigMap{}: {},
|
&corev1.ConfigMap{}: {},
|
||||||
&corev1.PersistentVolumeClaim{}: {},
|
&corev1.PersistentVolumeClaim{}: {},
|
||||||
&corev1.Service{}: {},
|
&corev1.Service{}: {},
|
||||||
&v1beta2.SparkApplication{}: {},
|
&v1beta2.SparkApplication{}: {},
|
||||||
&v1beta2.ScheduledSparkApplication{}: {},
|
|
||||||
&v1alpha1.SparkConnect{}: {},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -415,7 +377,7 @@ func newControllerOptions() controller.Options {
|
||||||
options := controller.Options{
|
options := controller.Options{
|
||||||
MaxConcurrentReconciles: controllerThreads,
|
MaxConcurrentReconciles: controllerThreads,
|
||||||
CacheSyncTimeout: cacheSyncTimeout,
|
CacheSyncTimeout: cacheSyncTimeout,
|
||||||
RateLimiter: util.NewRateLimiter[ctrl.Request](workqueueRateLimiterBucketQPS, workqueueRateLimiterBucketSize, workqueueRateLimiterMaxDelay),
|
RateLimiter: util.NewRateLimiter(workqueueRateLimiterBucketQPS, workqueueRateLimiterBucketSize, workqueueRateLimiterMaxDelay),
|
||||||
}
|
}
|
||||||
return options
|
return options
|
||||||
}
|
}
|
||||||
|
@ -430,17 +392,13 @@ func newSparkApplicationReconcilerOptions() sparkapplication.Options {
|
||||||
sparkExecutorMetrics.Register()
|
sparkExecutorMetrics.Register()
|
||||||
}
|
}
|
||||||
options := sparkapplication.Options{
|
options := sparkapplication.Options{
|
||||||
Namespaces: namespaces,
|
Namespaces: namespaces,
|
||||||
EnableUIService: enableUIService,
|
EnableUIService: enableUIService,
|
||||||
IngressClassName: ingressClassName,
|
IngressClassName: ingressClassName,
|
||||||
IngressURLFormat: ingressURLFormat,
|
IngressURLFormat: ingressURLFormat,
|
||||||
IngressTLS: ingressTLS,
|
DefaultBatchScheduler: defaultBatchScheduler,
|
||||||
IngressAnnotations: ingressAnnotations,
|
SparkApplicationMetrics: sparkApplicationMetrics,
|
||||||
DefaultBatchScheduler: defaultBatchScheduler,
|
SparkExecutorMetrics: sparkExecutorMetrics,
|
||||||
DriverPodCreationGracePeriod: driverPodCreationGracePeriod,
|
|
||||||
SparkApplicationMetrics: sparkApplicationMetrics,
|
|
||||||
SparkExecutorMetrics: sparkExecutorMetrics,
|
|
||||||
MaxTrackedExecutorPerApp: maxTrackedExecutorPerApp,
|
|
||||||
}
|
}
|
||||||
if enableBatchScheduler {
|
if enableBatchScheduler {
|
||||||
options.KubeSchedulerNames = kubeSchedulerNames
|
options.KubeSchedulerNames = kubeSchedulerNames
|
||||||
|
@ -454,10 +412,3 @@ func newScheduledSparkApplicationReconcilerOptions() scheduledsparkapplication.O
|
||||||
}
|
}
|
||||||
return options
|
return options
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSparkConnectReconcilerOptions() sparkconnect.Options {
|
|
||||||
options := sparkconnect.Options{
|
|
||||||
Namespaces: namespaces,
|
|
||||||
}
|
|
||||||
return options
|
|
||||||
}
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
Unless required by applicable law or agreed to in writing, software
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
@ -14,17 +14,14 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package main
|
package operator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"github.com/kubeflow/spark-operator/v2/cmd/operator/controller"
|
"github.com/kubeflow/spark-operator/cmd/operator/controller"
|
||||||
"github.com/kubeflow/spark-operator/v2/cmd/operator/version"
|
"github.com/kubeflow/spark-operator/cmd/operator/version"
|
||||||
"github.com/kubeflow/spark-operator/v2/cmd/operator/webhook"
|
"github.com/kubeflow/spark-operator/cmd/operator/webhook"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewCommand() *cobra.Command {
|
func NewCommand() *cobra.Command {
|
||||||
|
@ -40,10 +37,3 @@ func NewCommand() *cobra.Command {
|
||||||
command.AddCommand(version.NewCommand())
|
command.AddCommand(version.NewCommand())
|
||||||
return command
|
return command
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
|
||||||
if err := NewCommand().Execute(); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -19,7 +19,7 @@ package version
|
||||||
import (
|
import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
sparkoperator "github.com/kubeflow/spark-operator/v2"
|
sparkoperator "github.com/kubeflow/spark-operator"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -48,14 +48,15 @@ import (
|
||||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||||
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
|
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
|
|
||||||
sparkoperator "github.com/kubeflow/spark-operator/v2"
|
sparkoperator "github.com/kubeflow/spark-operator"
|
||||||
"github.com/kubeflow/spark-operator/v2/api/v1beta2"
|
"github.com/kubeflow/spark-operator/api/v1beta1"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/controller/mutatingwebhookconfiguration"
|
"github.com/kubeflow/spark-operator/api/v1beta2"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/controller/validatingwebhookconfiguration"
|
"github.com/kubeflow/spark-operator/internal/controller/mutatingwebhookconfiguration"
|
||||||
"github.com/kubeflow/spark-operator/v2/internal/webhook"
|
"github.com/kubeflow/spark-operator/internal/controller/validatingwebhookconfiguration"
|
||||||
"github.com/kubeflow/spark-operator/v2/pkg/certificate"
|
"github.com/kubeflow/spark-operator/internal/webhook"
|
||||||
"github.com/kubeflow/spark-operator/v2/pkg/common"
|
"github.com/kubeflow/spark-operator/pkg/certificate"
|
||||||
"github.com/kubeflow/spark-operator/v2/pkg/util"
|
"github.com/kubeflow/spark-operator/pkg/common"
|
||||||
|
"github.com/kubeflow/spark-operator/pkg/util"
|
||||||
// +kubebuilder:scaffold:imports
|
// +kubebuilder:scaffold:imports
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -85,9 +86,6 @@ var (
|
||||||
webhookServiceName string
|
webhookServiceName string
|
||||||
webhookServiceNamespace string
|
webhookServiceNamespace string
|
||||||
|
|
||||||
// Cert Manager
|
|
||||||
enableCertManager bool
|
|
||||||
|
|
||||||
// Leader election
|
// Leader election
|
||||||
enableLeaderElection bool
|
enableLeaderElection bool
|
||||||
leaderElectionLockName string
|
leaderElectionLockName string
|
||||||
|
@ -113,6 +111,7 @@ var (
|
||||||
func init() {
|
func init() {
|
||||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||||
|
|
||||||
|
utilruntime.Must(v1beta1.AddToScheme(scheme))
|
||||||
utilruntime.Must(v1beta2.AddToScheme(scheme))
|
utilruntime.Must(v1beta2.AddToScheme(scheme))
|
||||||
// +kubebuilder:scaffold:scheme
|
// +kubebuilder:scaffold:scheme
|
||||||
}
|
}
|
||||||
|
@ -130,13 +129,11 @@ func NewStartCommand() *cobra.Command {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Controller
|
|
||||||
command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.")
|
command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.")
|
||||||
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset or contains empty string.")
|
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset or contains empty string.")
|
||||||
command.Flags().StringVar(&labelSelectorFilter, "label-selector-filter", "", "A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.")
|
command.Flags().StringVar(&labelSelectorFilter, "label-selector-filter", "", "A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.")
|
||||||
command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.")
|
command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.")
|
||||||
|
|
||||||
// Webhook
|
|
||||||
command.Flags().StringVar(&webhookCertDir, "webhook-cert-dir", "/etc/k8s-webhook-server/serving-certs", "The directory that contains the webhook server key and certificate. "+
|
command.Flags().StringVar(&webhookCertDir, "webhook-cert-dir", "/etc/k8s-webhook-server/serving-certs", "The directory that contains the webhook server key and certificate. "+
|
||||||
"When running as nonRoot, you must create and own this directory before running this command.")
|
"When running as nonRoot, you must create and own this directory before running this command.")
|
||||||
command.Flags().StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The file name of webhook server certificate.")
|
command.Flags().StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The file name of webhook server certificate.")
|
||||||
|
@ -150,10 +147,6 @@ func NewStartCommand() *cobra.Command {
|
||||||
command.Flags().StringVar(&webhookServiceNamespace, "webhook-svc-namespace", "spark-webhook", "The name of the Service for the webhook server.")
|
command.Flags().StringVar(&webhookServiceNamespace, "webhook-svc-namespace", "spark-webhook", "The name of the Service for the webhook server.")
|
||||||
command.Flags().BoolVar(&enableResourceQuotaEnforcement, "enable-resource-quota-enforcement", false, "Whether to enable ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled.")
|
command.Flags().BoolVar(&enableResourceQuotaEnforcement, "enable-resource-quota-enforcement", false, "Whether to enable ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled.")
|
||||||
|
|
||||||
// Cert Manager
|
|
||||||
command.Flags().BoolVar(&enableCertManager, "enable-cert-manager", false, "Enable cert-manager to manage the webhook server's TLS certificate.")
|
|
||||||
|
|
||||||
// Leader election
|
|
||||||
command.Flags().BoolVar(&enableLeaderElection, "leader-election", false, "Enable leader election for controller manager. "+
|
command.Flags().BoolVar(&enableLeaderElection, "leader-election", false, "Enable leader election for controller manager. "+
|
||||||
"Enabling this will ensure there is only one active controller manager.")
|
"Enabling this will ensure there is only one active controller manager.")
|
||||||
command.Flags().StringVar(&leaderElectionLockName, "leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.")
|
command.Flags().StringVar(&leaderElectionLockName, "leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.")
|
||||||
|
@ -162,7 +155,6 @@ func NewStartCommand() *cobra.Command {
|
||||||
command.Flags().DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.")
|
command.Flags().DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.")
|
||||||
command.Flags().DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 4*time.Second, "Leader election retry period.")
|
command.Flags().DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 4*time.Second, "Leader election retry period.")
|
||||||
|
|
||||||
// Prometheus metrics
|
|
||||||
command.Flags().BoolVar(&enableMetrics, "enable-metrics", false, "Enable metrics.")
|
command.Flags().BoolVar(&enableMetrics, "enable-metrics", false, "Enable metrics.")
|
||||||
command.Flags().StringVar(&metricsBindAddress, "metrics-bind-address", "0", "The address the metric endpoint binds to. "+
|
command.Flags().StringVar(&metricsBindAddress, "metrics-bind-address", "0", "The address the metric endpoint binds to. "+
|
||||||
"Use the port :8080. If not set, it will be 0 in order to disable the metrics server")
|
"Use the port :8080. If not set, it will be 0 in order to disable the metrics server")
|
||||||
|
@ -206,7 +198,6 @@ func start() {
|
||||||
Port: webhookPort,
|
Port: webhookPort,
|
||||||
CertDir: webhookCertDir,
|
CertDir: webhookCertDir,
|
||||||
CertName: webhookCertName,
|
CertName: webhookCertName,
|
||||||
KeyName: webhookKeyName,
|
|
||||||
TLSOpts: tlsOptions,
|
TLSOpts: tlsOptions,
|
||||||
}),
|
}),
|
||||||
HealthProbeBindAddress: healthProbeBindAddress,
|
HealthProbeBindAddress: healthProbeBindAddress,
|
||||||
|
@ -240,7 +231,6 @@ func start() {
|
||||||
client,
|
client,
|
||||||
webhookServiceName,
|
webhookServiceName,
|
||||||
webhookServiceNamespace,
|
webhookServiceNamespace,
|
||||||
enableCertManager,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if err := wait.ExponentialBackoff(
|
if err := wait.ExponentialBackoff(
|
||||||
|
@ -251,6 +241,7 @@ func start() {
|
||||||
Jitter: 0.1,
|
Jitter: 0.1,
|
||||||
},
|
},
|
||||||
func() (bool, error) {
|
func() (bool, error) {
|
||||||
|
logger.Info("Syncing webhook secret", "name", webhookSecretName, "namespace", webhookSecretNamespace)
|
||||||
if err := certProvider.SyncSecret(context.TODO(), webhookSecretName, webhookSecretNamespace); err != nil {
|
if err := certProvider.SyncSecret(context.TODO(), webhookSecretName, webhookSecretNamespace); err != nil {
|
||||||
if errors.IsAlreadyExists(err) || errors.IsConflict(err) {
|
if errors.IsAlreadyExists(err) || errors.IsConflict(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -270,24 +261,22 @@ func start() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !enableCertManager {
|
if err := mutatingwebhookconfiguration.NewReconciler(
|
||||||
if err := mutatingwebhookconfiguration.NewReconciler(
|
mgr.GetClient(),
|
||||||
mgr.GetClient(),
|
certProvider,
|
||||||
certProvider,
|
mutatingWebhookName,
|
||||||
mutatingWebhookName,
|
).SetupWithManager(mgr, controller.Options{}); err != nil {
|
||||||
).SetupWithManager(mgr, controller.Options{}); err != nil {
|
logger.Error(err, "Failed to create controller", "controller", "MutatingWebhookConfiguration")
|
||||||
logger.Error(err, "Failed to create controller", "controller", "MutatingWebhookConfiguration")
|
os.Exit(1)
|
||||||
os.Exit(1)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if err := validatingwebhookconfiguration.NewReconciler(
|
if err := validatingwebhookconfiguration.NewReconciler(
|
||||||
mgr.GetClient(),
|
mgr.GetClient(),
|
||||||
certProvider,
|
certProvider,
|
||||||
validatingWebhookName,
|
validatingWebhookName,
|
||||||
).SetupWithManager(mgr, controller.Options{}); err != nil {
|
).SetupWithManager(mgr, controller.Options{}); err != nil {
|
||||||
logger.Error(err, "Failed to create controller", "controller", "ValidatingWebhookConfiguration")
|
logger.Error(err, "Failed to create controller", "controller", "ValidatingWebhookConfiguration")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ctrl.NewWebhookManagedBy(mgr).
|
if err := ctrl.NewWebhookManagedBy(mgr).
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
coverage:
|
||||||
|
status:
|
||||||
|
project:
|
||||||
|
default:
|
||||||
|
threshold: 0.1%
|
||||||
|
patch:
|
||||||
|
default:
|
||||||
|
target: 60%
|
||||||
|
ignore:
|
||||||
|
- "**/*_generated.*"
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,272 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298
|
|
||||||
controller-gen.kubebuilder.io/version: v0.17.1
|
|
||||||
name: sparkconnects.sparkoperator.k8s.io
|
|
||||||
spec:
|
|
||||||
group: sparkoperator.k8s.io
|
|
||||||
names:
|
|
||||||
kind: SparkConnect
|
|
||||||
listKind: SparkConnectList
|
|
||||||
plural: sparkconnects
|
|
||||||
shortNames:
|
|
||||||
- sparkconn
|
|
||||||
singular: sparkconnect
|
|
||||||
scope: Namespaced
|
|
||||||
versions:
|
|
||||||
- additionalPrinterColumns:
|
|
||||||
- jsonPath: .metadata.creationTimestamp
|
|
||||||
name: Age
|
|
||||||
type: date
|
|
||||||
name: v1alpha1
|
|
||||||
schema:
|
|
||||||
openAPIV3Schema:
|
|
||||||
description: SparkConnect is the Schema for the sparkconnections API.
|
|
||||||
properties:
|
|
||||||
apiVersion:
|
|
||||||
description: |-
|
|
||||||
APIVersion defines the versioned schema of this representation of an object.
|
|
||||||
Servers should convert recognized schemas to the latest internal value, and
|
|
||||||
may reject unrecognized values.
|
|
||||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
|
||||||
type: string
|
|
||||||
kind:
|
|
||||||
description: |-
|
|
||||||
Kind is a string value representing the REST resource this object represents.
|
|
||||||
Servers may infer this from the endpoint the client submits requests to.
|
|
||||||
Cannot be updated.
|
|
||||||
In CamelCase.
|
|
||||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
|
||||||
type: string
|
|
||||||
metadata:
|
|
||||||
type: object
|
|
||||||
spec:
|
|
||||||
description: SparkConnectSpec defines the desired state of SparkConnect.
|
|
||||||
properties:
|
|
||||||
dynamicAllocation:
|
|
||||||
description: |-
|
|
||||||
DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes
|
|
||||||
scheduler backend since Spark 3.0.
|
|
||||||
properties:
|
|
||||||
enabled:
|
|
||||||
description: Enabled controls whether dynamic allocation is enabled
|
|
||||||
or not.
|
|
||||||
type: boolean
|
|
||||||
initialExecutors:
|
|
||||||
description: |-
|
|
||||||
InitialExecutors is the initial number of executors to request. If .spec.executor.instances
|
|
||||||
is also set, the initial number of executors is set to the bigger of that and this option.
|
|
||||||
format: int32
|
|
||||||
type: integer
|
|
||||||
maxExecutors:
|
|
||||||
description: MaxExecutors is the upper bound for the number of
|
|
||||||
executors if dynamic allocation is enabled.
|
|
||||||
format: int32
|
|
||||||
type: integer
|
|
||||||
minExecutors:
|
|
||||||
description: MinExecutors is the lower bound for the number of
|
|
||||||
executors if dynamic allocation is enabled.
|
|
||||||
format: int32
|
|
||||||
type: integer
|
|
||||||
shuffleTrackingEnabled:
|
|
||||||
description: |-
|
|
||||||
ShuffleTrackingEnabled enables shuffle file tracking for executors, which allows dynamic allocation without
|
|
||||||
the need for an external shuffle service. This option will try to keep alive executors that are storing
|
|
||||||
shuffle data for active jobs. If external shuffle service is enabled, set ShuffleTrackingEnabled to false.
|
|
||||||
ShuffleTrackingEnabled is true by default if dynamicAllocation.enabled is true.
|
|
||||||
type: boolean
|
|
||||||
shuffleTrackingTimeout:
|
|
||||||
description: |-
|
|
||||||
ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding
|
|
||||||
shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled).
|
|
||||||
format: int64
|
|
||||||
type: integer
|
|
||||||
type: object
|
|
||||||
executor:
|
|
||||||
description: Executor is the Spark executor specification.
|
|
||||||
properties:
|
|
||||||
cores:
|
|
||||||
description: Cores maps to `spark.driver.cores` or `spark.executor.cores`
|
|
||||||
for the driver and executors, respectively.
|
|
||||||
format: int32
|
|
||||||
minimum: 1
|
|
||||||
type: integer
|
|
||||||
instances:
|
|
||||||
description: Instances is the number of executor instances.
|
|
||||||
format: int32
|
|
||||||
minimum: 0
|
|
||||||
type: integer
|
|
||||||
memory:
|
|
||||||
description: Memory is the amount of memory to request for the
|
|
||||||
pod.
|
|
||||||
type: string
|
|
||||||
template:
|
|
||||||
description: |-
|
|
||||||
Template is a pod template that can be used to define the driver or executor pod configurations that Spark configurations do not support.
|
|
||||||
Spark version >= 3.0.0 is required.
|
|
||||||
Ref: https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template.
|
|
||||||
type: object
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
type: object
|
|
||||||
hadoopConf:
|
|
||||||
additionalProperties:
|
|
||||||
type: string
|
|
||||||
description: |-
|
|
||||||
HadoopConf carries user-specified Hadoop configuration properties as they would use the "--conf" option
|
|
||||||
in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop
|
|
||||||
configuration properties.
|
|
||||||
type: object
|
|
||||||
image:
|
|
||||||
description: |-
|
|
||||||
Image is the container image for the driver, executor, and init-container. Any custom container images for the
|
|
||||||
driver, executor, or init-container takes precedence over this.
|
|
||||||
type: string
|
|
||||||
server:
|
|
||||||
description: Server is the Spark connect server specification.
|
|
||||||
properties:
|
|
||||||
cores:
|
|
||||||
description: Cores maps to `spark.driver.cores` or `spark.executor.cores`
|
|
||||||
for the driver and executors, respectively.
|
|
||||||
format: int32
|
|
||||||
minimum: 1
|
|
||||||
type: integer
|
|
||||||
memory:
|
|
||||||
description: Memory is the amount of memory to request for the
|
|
||||||
pod.
|
|
||||||
type: string
|
|
||||||
template:
|
|
||||||
description: |-
|
|
||||||
Template is a pod template that can be used to define the driver or executor pod configurations that Spark configurations do not support.
|
|
||||||
Spark version >= 3.0.0 is required.
|
|
||||||
Ref: https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template.
|
|
||||||
type: object
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
type: object
|
|
||||||
sparkConf:
|
|
||||||
additionalProperties:
|
|
||||||
type: string
|
|
||||||
description: |-
|
|
||||||
SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in
|
|
||||||
spark-submit.
|
|
||||||
type: object
|
|
||||||
sparkVersion:
|
|
||||||
description: SparkVersion is the version of Spark the spark connect
|
|
||||||
use.
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- executor
|
|
||||||
- server
|
|
||||||
- sparkVersion
|
|
||||||
type: object
|
|
||||||
status:
|
|
||||||
description: SparkConnectStatus defines the observed state of SparkConnect.
|
|
||||||
properties:
|
|
||||||
conditions:
|
|
||||||
description: Represents the latest available observations of a SparkConnect's
|
|
||||||
current state.
|
|
||||||
items:
|
|
||||||
description: Condition contains details for one aspect of the current
|
|
||||||
state of this API Resource.
|
|
||||||
properties:
|
|
||||||
lastTransitionTime:
|
|
||||||
description: |-
|
|
||||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
|
||||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
|
||||||
format: date-time
|
|
||||||
type: string
|
|
||||||
message:
|
|
||||||
description: |-
|
|
||||||
message is a human readable message indicating details about the transition.
|
|
||||||
This may be an empty string.
|
|
||||||
maxLength: 32768
|
|
||||||
type: string
|
|
||||||
observedGeneration:
|
|
||||||
description: |-
|
|
||||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
|
||||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
|
||||||
with respect to the current state of the instance.
|
|
||||||
format: int64
|
|
||||||
minimum: 0
|
|
||||||
type: integer
|
|
||||||
reason:
|
|
||||||
description: |-
|
|
||||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
|
||||||
Producers of specific condition types may define expected values and meanings for this field,
|
|
||||||
and whether the values are considered a guaranteed API.
|
|
||||||
The value should be a CamelCase string.
|
|
||||||
This field may not be empty.
|
|
||||||
maxLength: 1024
|
|
||||||
minLength: 1
|
|
||||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
|
||||||
type: string
|
|
||||||
status:
|
|
||||||
description: status of the condition, one of True, False, Unknown.
|
|
||||||
enum:
|
|
||||||
- "True"
|
|
||||||
- "False"
|
|
||||||
- Unknown
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
|
||||||
maxLength: 316
|
|
||||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- lastTransitionTime
|
|
||||||
- message
|
|
||||||
- reason
|
|
||||||
- status
|
|
||||||
- type
|
|
||||||
type: object
|
|
||||||
type: array
|
|
||||||
x-kubernetes-list-map-keys:
|
|
||||||
- type
|
|
||||||
x-kubernetes-list-type: map
|
|
||||||
executors:
|
|
||||||
additionalProperties:
|
|
||||||
type: integer
|
|
||||||
description: Executors represents the current state of the SparkConnect
|
|
||||||
executors.
|
|
||||||
type: object
|
|
||||||
lastUpdateTime:
|
|
||||||
description: LastUpdateTime is the time at which the SparkConnect
|
|
||||||
controller last updated the SparkConnect.
|
|
||||||
format: date-time
|
|
||||||
type: string
|
|
||||||
server:
|
|
||||||
description: Server represents the current state of the SparkConnect
|
|
||||||
server.
|
|
||||||
properties:
|
|
||||||
podIp:
|
|
||||||
description: PodIP is the IP address of the pod that is running
|
|
||||||
the Spark Connect server.
|
|
||||||
type: string
|
|
||||||
podName:
|
|
||||||
description: PodName is the name of the pod that is running the
|
|
||||||
Spark Connect server.
|
|
||||||
type: string
|
|
||||||
serviceName:
|
|
||||||
description: ServiceName is the name of the service that is exposing
|
|
||||||
the Spark Connect server.
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
startTime:
|
|
||||||
description: StartTime is the time at which the SparkConnect controller
|
|
||||||
started processing the SparkConnect.
|
|
||||||
format: date-time
|
|
||||||
type: string
|
|
||||||
state:
|
|
||||||
description: State represents the current state of the SparkConnect.
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
required:
|
|
||||||
- metadata
|
|
||||||
- spec
|
|
||||||
type: object
|
|
||||||
served: true
|
|
||||||
storage: true
|
|
||||||
subresources:
|
|
||||||
status: {}
|
|
|
@ -46,9 +46,6 @@ rules:
|
||||||
- create
|
- create
|
||||||
- delete
|
- delete
|
||||||
- get
|
- get
|
||||||
- list
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- apiextensions.k8s.io
|
- apiextensions.k8s.io
|
||||||
resources:
|
resources:
|
||||||
|
@ -57,6 +54,17 @@ rules:
|
||||||
- get
|
- get
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- extensions
|
- extensions
|
||||||
|
resources:
|
||||||
|
- ingresses
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
- networking.k8s.io
|
- networking.k8s.io
|
||||||
resources:
|
resources:
|
||||||
- ingresses
|
- ingresses
|
||||||
|
@ -65,14 +73,13 @@ rules:
|
||||||
- delete
|
- delete
|
||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- sparkoperator.k8s.io
|
- sparkoperator.k8s.io
|
||||||
resources:
|
resources:
|
||||||
- scheduledsparkapplications
|
- scheduledsparkapplications
|
||||||
- sparkapplications
|
|
||||||
- sparkconnects
|
|
||||||
verbs:
|
verbs:
|
||||||
- create
|
- create
|
||||||
- delete
|
- delete
|
||||||
|
@ -85,15 +92,38 @@ rules:
|
||||||
- sparkoperator.k8s.io
|
- sparkoperator.k8s.io
|
||||||
resources:
|
resources:
|
||||||
- scheduledsparkapplications/finalizers
|
- scheduledsparkapplications/finalizers
|
||||||
- sparkapplications/finalizers
|
|
||||||
verbs:
|
verbs:
|
||||||
- update
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- sparkoperator.k8s.io
|
- sparkoperator.k8s.io
|
||||||
resources:
|
resources:
|
||||||
- scheduledsparkapplications/status
|
- scheduledsparkapplications/status
|
||||||
- sparkapplications/status
|
verbs:
|
||||||
- sparkconnects/status
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- sparkoperator.k8s.io
|
||||||
|
resources:
|
||||||
|
- sparkapplications
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- sparkoperator.k8s.io
|
||||||
|
resources:
|
||||||
|
- sparkapplications/finalizers
|
||||||
|
verbs:
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- sparkoperator.k8s.io
|
||||||
|
resources:
|
||||||
|
- sparkapplications/status
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- patch
|
- patch
|
||||||
|
|
|
@ -1,57 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2024 The Kubeflow authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: spark-operator-spark
|
|
||||||
namespace: default
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
namespace: default
|
|
||||||
name: spark-role
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- pods
|
|
||||||
- configmaps
|
|
||||||
- persistentvolumeclaims
|
|
||||||
- services
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- patch
|
|
||||||
- delete
|
|
||||||
- deletecollection
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
name: spark-role-binding
|
|
||||||
namespace: default
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: spark-operator-spark
|
|
||||||
namespace: default
|
|
||||||
roleRef:
|
|
||||||
kind: Role
|
|
||||||
name: spark-role
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
|
@ -1,5 +1,7 @@
|
||||||
## Append samples of your project ##
|
## Append samples of your project ##
|
||||||
resources:
|
resources:
|
||||||
|
- v1beta1_sparkapplication.yaml
|
||||||
|
- v1beta1_scheduledsparkapplication.yaml
|
||||||
- v1beta2_sparkapplication.yaml
|
- v1beta2_sparkapplication.yaml
|
||||||
- v1beta2_scheduledsparkapplication.yaml
|
- v1beta2_scheduledsparkapplication.yaml
|
||||||
# +kubebuilder:scaffold:manifestskustomizesamples
|
# +kubebuilder:scaffold:manifestskustomizesamples
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
apiVersion: sparkoperator.k8s.io/v1beta1
|
||||||
|
kind: ScheduledSparkApplication
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: spark-operator
|
||||||
|
app.kubernetes.io/managed-by: kustomize
|
||||||
|
name: scheduledsparkapplication-sample
|
||||||
|
spec:
|
||||||
|
# TODO(user): Add fields here
|
|
@ -0,0 +1,23 @@
|
||||||
|
apiVersion: sparkoperator.k8s.io/v1beta1
|
||||||
|
kind: SparkApplication
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: spark-operator
|
||||||
|
app.kubernetes.io/managed-by: kustomize
|
||||||
|
name: sparkapplication-sample
|
||||||
|
spec:
|
||||||
|
type: Scala
|
||||||
|
mode: cluster
|
||||||
|
image: spark:3.5.2
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
|
sparkVersion: 3.5.2
|
||||||
|
driver:
|
||||||
|
labels:
|
||||||
|
version: 3.5.2
|
||||||
|
serviceAccount: spark-operator-spark
|
||||||
|
executor:
|
||||||
|
labels:
|
||||||
|
version: 3.5.2
|
||||||
|
instances: 1
|
|
@ -11,23 +11,23 @@ spec:
|
||||||
template:
|
template:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
restartPolicy:
|
restartPolicy:
|
||||||
type: Never
|
type: Never
|
||||||
driver:
|
driver:
|
||||||
labels:
|
labels:
|
||||||
version: 4.0.0
|
version: 3.5.2
|
||||||
cores: 1
|
cores: 1
|
||||||
coreLimit: 1200m
|
coreLimit: 1200m
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
executor:
|
executor:
|
||||||
labels:
|
labels:
|
||||||
version: 4.0.0
|
version: 3.5.2
|
||||||
instances: 1
|
instances: 1
|
||||||
cores: 1
|
cores: 1
|
||||||
coreLimit: 1200m
|
coreLimit: 1200m
|
||||||
|
|
|
@ -8,16 +8,16 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
driver:
|
driver:
|
||||||
labels:
|
labels:
|
||||||
version: 4.0.0
|
version: 3.5.2
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
executor:
|
executor:
|
||||||
labels:
|
labels:
|
||||||
version: 4.0.0
|
version: 3.5.2
|
||||||
instances: 1
|
instances: 1
|
||||||
|
|
1327
docs/api-docs.md
1327
docs/api-docs.md
File diff suppressed because it is too large
Load Diff
|
@ -4,23 +4,21 @@
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
# Check whether there is a passwd entry for the container UID
|
# Check whether there is a passwd entry for the container UID
|
||||||
myuid="$(id -u)"
|
uid=$(id -u)
|
||||||
# If there is no passwd entry for the container UID, attempt to fake one
|
gid=$(id -g)
|
||||||
# You can also refer to the https://github.com/docker-library/official-images/pull/13089#issuecomment-1534706523
|
|
||||||
# It's to resolve OpenShift random UID case.
|
# turn off -e for getent because it will return error code in anonymous uid case
|
||||||
# See also: https://github.com/docker-library/postgres/pull/448
|
set +e
|
||||||
if ! getent passwd "$myuid" &> /dev/null; then
|
uidentry=$(getent passwd $uid)
|
||||||
for wrapper in {/usr,}/lib{/*,}/libnss_wrapper.so; do
|
set -e
|
||||||
if [ -s "$wrapper" ]; then
|
|
||||||
NSS_WRAPPER_PASSWD="$(mktemp)"
|
# If there is no passwd entry for the container UID, attempt to create one
|
||||||
NSS_WRAPPER_GROUP="$(mktemp)"
|
if [[ -z "$uidentry" ]] ; then
|
||||||
export LD_PRELOAD="$wrapper" NSS_WRAPPER_PASSWD NSS_WRAPPER_GROUP
|
if [[ -w /etc/passwd ]] ; then
|
||||||
mygid="$(id -g)"
|
echo "$uid:x:$uid:$gid:anonymous uid:$SPARK_HOME:/bin/false" >> /etc/passwd
|
||||||
printf 'spark:x:%s:%s:${SPARK_USER_NAME:-anonymous uid}:%s:/bin/false\n' "$myuid" "$mygid" "$SPARK_HOME" > "$NSS_WRAPPER_PASSWD"
|
else
|
||||||
printf 'spark:x:%s:\n' "$mygid" > "$NSS_WRAPPER_GROUP"
|
echo "Container ENTRYPOINT failed to add passwd entry for anonymous UID"
|
||||||
break
|
fi
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exec /usr/bin/tini -s -- /usr/bin/spark-operator "$@"
|
exec /usr/bin/tini -s -- /usr/bin/spark-operator "$@"
|
||||||
|
|
|
@ -21,11 +21,11 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
restartPolicy:
|
restartPolicy:
|
||||||
type: Never
|
type: Never
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -39,16 +39,6 @@ spec:
|
||||||
- name: config-vol
|
- name: config-vol
|
||||||
mountPath: /opt/spark/config
|
mountPath: /opt/spark/config
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
instances: 1
|
instances: 1
|
||||||
cores: 1
|
cores: 1
|
||||||
|
@ -56,13 +46,3 @@ spec:
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: config-vol
|
- name: config-vol
|
||||||
mountPath: /opt/spark/config
|
mountPath: /opt/spark/config
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
|
@ -21,11 +21,11 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
restartPolicy:
|
restartPolicy:
|
||||||
type: Never
|
type: Never
|
||||||
driver:
|
driver:
|
||||||
|
@ -33,28 +33,8 @@ spec:
|
||||||
coreLimit: 800m
|
coreLimit: 800m
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
instances: 1
|
instances: 1
|
||||||
coreRequest: "1200m"
|
coreRequest: "1200m"
|
||||||
coreLimit: 1500m
|
coreLimit: 1500m
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
|
@ -21,39 +21,19 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
driver:
|
driver:
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
instances: 1
|
instances: 1
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
dynamicAllocation:
|
dynamicAllocation:
|
||||||
enabled: true
|
enabled: true
|
||||||
initialExecutors: 2
|
initialExecutors: 2
|
||||||
|
|
|
@ -21,37 +21,17 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
driver:
|
driver:
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
instances: 2
|
instances: 2
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
batchScheduler: kube-scheduler
|
batchScheduler: kube-scheduler
|
||||||
|
|
|
@ -1,217 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2024 The Kubeflow authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: test-configmap
|
|
||||||
namespace: default
|
|
||||||
data:
|
|
||||||
KEY1: VALUE1
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: test-secret
|
|
||||||
namespace: default
|
|
||||||
stringData:
|
|
||||||
KEY2: VALUE2
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: test-configmap-2
|
|
||||||
namespace: default
|
|
||||||
data:
|
|
||||||
KEY3: VALUE3
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: test-secret-2
|
|
||||||
namespace: default
|
|
||||||
stringData:
|
|
||||||
KEY4: VALUE4
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: sparkoperator.k8s.io/v1beta2
|
|
||||||
kind: SparkApplication
|
|
||||||
metadata:
|
|
||||||
name: spark-pi-pod-template
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
type: Scala
|
|
||||||
mode: cluster
|
|
||||||
sparkVersion: 4.0.0
|
|
||||||
image: docker.io/library/spark:4.0.0
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
|
||||||
arguments:
|
|
||||||
- "10000"
|
|
||||||
driver:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
spark.apache.org/version: 4.0.0
|
|
||||||
annotations:
|
|
||||||
spark.apache.org/version: 4.0.0
|
|
||||||
spec:
|
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
containers:
|
|
||||||
- name: spark-kubernetes-driver
|
|
||||||
env:
|
|
||||||
- name: KEY0
|
|
||||||
value: VALUE0
|
|
||||||
- name: KEY1
|
|
||||||
valueFrom:
|
|
||||||
configMapKeyRef:
|
|
||||||
name: test-configmap
|
|
||||||
key: KEY1
|
|
||||||
- name: KEY2
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: test-secret
|
|
||||||
key: KEY2
|
|
||||||
envFrom:
|
|
||||||
- configMapRef:
|
|
||||||
name: test-configmap-2
|
|
||||||
- secretRef:
|
|
||||||
name: test-secret-2
|
|
||||||
ports:
|
|
||||||
- name: custom-port
|
|
||||||
containerPort: 12345
|
|
||||||
protocol: TCP
|
|
||||||
# The resources section will not work for cpu/memory requests and limits.
|
|
||||||
# Ref: https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template.
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
# Please use `spec.driver.cores` instead.
|
|
||||||
cpu: 500m
|
|
||||||
# Please use `spec.driver.memory` and `spec.driver.memoryOverhead` instead.
|
|
||||||
memory: 512Mi
|
|
||||||
limits:
|
|
||||||
# Please use `spec.driver.coreLimit` instead.
|
|
||||||
cpu: 1
|
|
||||||
# Please use `spec.driver.memory` and `spec.driver.memoryOverhead` instead.
|
|
||||||
memory: 1Gi
|
|
||||||
nodeSelector:
|
|
||||||
kubernetes.io/os: linux
|
|
||||||
affinity:
|
|
||||||
podAffinity:
|
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- weight: 1
|
|
||||||
podAffinityTerm:
|
|
||||||
labelSelector:
|
|
||||||
matchLabels:
|
|
||||||
spark-app-name: spark-pi-pod-template
|
|
||||||
topologyKey: kubernetes.io/hostname
|
|
||||||
tolerations:
|
|
||||||
- operator: Exists
|
|
||||||
effect: NoSchedule
|
|
||||||
serviceAccountName: spark-operator-spark
|
|
||||||
cores: 1
|
|
||||||
coreLimit: "1"
|
|
||||||
memory: 512m
|
|
||||||
memoryOverhead: 512m
|
|
||||||
executor:
|
|
||||||
instances: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
spark.apache.org/version: 4.0.0
|
|
||||||
annotations:
|
|
||||||
spark.apache.org/version: 4.0.0
|
|
||||||
spec:
|
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
containers:
|
|
||||||
- name: spark-kubernetes-executor
|
|
||||||
env:
|
|
||||||
- name: KEY0
|
|
||||||
value: VALUE0
|
|
||||||
- name: KEY1
|
|
||||||
valueFrom:
|
|
||||||
configMapKeyRef:
|
|
||||||
name: test-configmap
|
|
||||||
key: KEY1
|
|
||||||
- name: KEY2
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: test-secret
|
|
||||||
key: KEY2
|
|
||||||
envFrom:
|
|
||||||
- configMapRef:
|
|
||||||
name: test-configmap-2
|
|
||||||
- secretRef:
|
|
||||||
name: test-secret-2
|
|
||||||
volumeMounts:
|
|
||||||
- name: spark-local-dir-1
|
|
||||||
mountPath: /mnt/disk1
|
|
||||||
# The resources section will not work for cpu/memory requests and limits.
|
|
||||||
# Ref: https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template.
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
# Please use `spec.executor.cores` instead.
|
|
||||||
cpu: 1
|
|
||||||
# Please use `spec.executor.memory` and `spec.executor.memoryOverhead` instead.
|
|
||||||
memory: 1Gi
|
|
||||||
limits:
|
|
||||||
# Please use `spec.executor.coreLimit` instead.
|
|
||||||
cpu: 1500m
|
|
||||||
# Please use `spec.executor.memory` and `spec.executor.memoryOverhead` instead.
|
|
||||||
memory: 1512Mi
|
|
||||||
volumes:
|
|
||||||
- name: spark-local-dir-1
|
|
||||||
emptyDir:
|
|
||||||
sizeLimit: 100Mi
|
|
||||||
nodeSelector:
|
|
||||||
kubernetes.io/os: linux
|
|
||||||
affinity:
|
|
||||||
podAffinity:
|
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- weight: 1
|
|
||||||
podAffinityTerm:
|
|
||||||
labelSelector:
|
|
||||||
matchLabels:
|
|
||||||
spark-app-name: spark-pi-pod-template
|
|
||||||
topologyKey: kubernetes.io/hostname
|
|
||||||
tolerations:
|
|
||||||
- operator: Exists
|
|
||||||
effect: NoSchedule
|
|
||||||
cores: 1
|
|
||||||
coreLimit: 1500m
|
|
||||||
memory: 1g
|
|
||||||
memoryOverhead: 512m
|
|
|
@ -1,68 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2024 The Kubeflow authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
apiVersion: sparkoperator.k8s.io/v1beta2
|
|
||||||
kind: SparkApplication
|
|
||||||
metadata:
|
|
||||||
name: spark-pi-python
|
|
||||||
spec:
|
|
||||||
type: Python
|
|
||||||
pythonVersion: "3"
|
|
||||||
mode: cluster
|
|
||||||
image: docker.io/library/spark:4.0.0
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
mainApplicationFile: local:///opt/spark/examples/src/main/python/pi.py
|
|
||||||
sparkVersion: 4.0.0
|
|
||||||
sparkConf:
|
|
||||||
# Expose Spark metrics for Prometheus
|
|
||||||
"spark.kubernetes.driver.annotation.prometheus.io/scrape": "true"
|
|
||||||
"spark.kubernetes.driver.annotation.prometheus.io/path": "/metrics/executors/prometheus/"
|
|
||||||
"spark.kubernetes.driver.annotation.prometheus.io/port": "4040"
|
|
||||||
"spark.kubernetes.driver.service.annotation.prometheus.io/scrape": "true"
|
|
||||||
"spark.kubernetes.driver.service.annotation.prometheus.io/path": "/metrics/driver/prometheus/"
|
|
||||||
"spark.kubernetes.driver.service.annotation.prometheus.io/port": "4040"
|
|
||||||
"spark.ui.prometheus.enabled": "true"
|
|
||||||
"spark.executor.processTreeMetrics.enabled": "true"
|
|
||||||
"spark.metrics.conf.*.sink.prometheusServlet.class": "org.apache.spark.metrics.sink.PrometheusServlet"
|
|
||||||
"spark.metrics.conf.driver.sink.prometheusServlet.path": "/metrics/driver/prometheus/"
|
|
||||||
"spark.metrics.conf.executor.sink.prometheusServlet.path": "/metrics/executors/prometheus/"
|
|
||||||
driver:
|
|
||||||
cores: 1
|
|
||||||
memory: 512m
|
|
||||||
serviceAccount: spark-operator-spark
|
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
|
||||||
instances: 1
|
|
||||||
cores: 1
|
|
||||||
memory: 512m
|
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
|
@ -22,47 +22,27 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: {IMAGE_REGISTRY}/{IMAGE_REPOSITORY}/docker.io/library/spark:4.0.0-gcs-prometheus
|
image: {IMAGE_REGISTRY}/{IMAGE_REPOSITORY}/spark:3.5.2-gcs-prometheus
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
arguments:
|
arguments:
|
||||||
- "100000"
|
- "100000"
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
restartPolicy:
|
restartPolicy:
|
||||||
type: Never
|
type: Never
|
||||||
driver:
|
driver:
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
labels:
|
labels:
|
||||||
version: 4.0.0
|
version: 3.5.2
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
cores: 1
|
cores: 1
|
||||||
instances: 1
|
instances: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
labels:
|
labels:
|
||||||
version: 4.0.0
|
version: 3.5.2
|
||||||
monitoring:
|
monitoring:
|
||||||
exposeDriverMetrics: true
|
exposeDriverMetrics: true
|
||||||
exposeExecutorMetrics: true
|
exposeExecutorMetrics: true
|
||||||
|
|
|
@ -22,35 +22,15 @@ spec:
|
||||||
type: Python
|
type: Python
|
||||||
pythonVersion: "3"
|
pythonVersion: "3"
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainApplicationFile: local:///opt/spark/examples/src/main/python/pi.py
|
mainApplicationFile: local:///opt/spark/examples/src/main/python/pi.py
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
driver:
|
driver:
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
instances: 1
|
instances: 1
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
|
@ -25,38 +25,18 @@ spec:
|
||||||
template:
|
template:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
restartPolicy:
|
restartPolicy:
|
||||||
type: Never
|
type: Never
|
||||||
driver:
|
driver:
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
instances: 1
|
instances: 1
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
|
@ -21,37 +21,17 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
timeToLiveSeconds: 30
|
timeToLiveSeconds: 30
|
||||||
driver:
|
driver:
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
instances: 1
|
instances: 1
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
|
@ -21,37 +21,17 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
driver:
|
driver:
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
instances: 2
|
instances: 2
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
batchScheduler: volcano
|
batchScheduler: volcano
|
||||||
|
|
|
@ -21,39 +21,19 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
driver:
|
driver:
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
instances: 2
|
instances: 2
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
batchScheduler: yunikorn
|
batchScheduler: yunikorn
|
||||||
batchSchedulerOptions:
|
batchSchedulerOptions:
|
||||||
queue: root.default
|
queue: root.default
|
|
@ -21,42 +21,22 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
type: Scala
|
type: Scala
|
||||||
mode: cluster
|
mode: cluster
|
||||||
image: docker.io/library/spark:4.0.0
|
image: spark:3.5.2
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
mainClass: org.apache.spark.examples.SparkPi
|
mainClass: org.apache.spark.examples.SparkPi
|
||||||
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
|
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.2.jar
|
||||||
arguments:
|
arguments:
|
||||||
- "5000"
|
- "5000"
|
||||||
sparkVersion: 4.0.0
|
sparkVersion: 3.5.2
|
||||||
driver:
|
driver:
|
||||||
labels:
|
labels:
|
||||||
version: 4.0.0
|
version: 3.5.2
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
serviceAccount: spark-operator-spark
|
serviceAccount: spark-operator-spark
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
executor:
|
||||||
labels:
|
labels:
|
||||||
version: 4.0.0
|
version: 3.5.2
|
||||||
instances: 1
|
instances: 1
|
||||||
cores: 1
|
cores: 1
|
||||||
memory: 512m
|
memory: 512m
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
|
@ -1,81 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2025 The Kubeflow authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
apiVersion: sparkoperator.k8s.io/v1alpha1
|
|
||||||
kind: SparkConnect
|
|
||||||
metadata:
|
|
||||||
name: spark-connect
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
sparkVersion: 4.0.0
|
|
||||||
server:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
key1: value1
|
|
||||||
key2: value2
|
|
||||||
annotations:
|
|
||||||
key3: value3
|
|
||||||
key4: value4
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: spark-kubernetes-driver
|
|
||||||
image: spark:4.0.0
|
|
||||||
imagePullPolicy: Always
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 1
|
|
||||||
memory: 1Gi
|
|
||||||
limits:
|
|
||||||
cpu: 1
|
|
||||||
memory: 1Gi
|
|
||||||
serviceAccount: spark-operator-spark
|
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
executor:
|
|
||||||
instances: 2
|
|
||||||
cores: 1
|
|
||||||
memory: 512m
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
key1: value1
|
|
||||||
key2: value2
|
|
||||||
annotations:
|
|
||||||
key3: value3
|
|
||||||
key4: value4
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: spark-kubernetes-executor
|
|
||||||
image: spark:4.0.0
|
|
||||||
imagePullPolicy: Always
|
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsGroup: 185
|
|
||||||
runAsUser: 185
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
244
go.mod
244
go.mod
|
@ -1,40 +1,53 @@
|
||||||
module github.com/kubeflow/spark-operator/v2
|
module github.com/kubeflow/spark-operator
|
||||||
|
|
||||||
go 1.24.1
|
go 1.23.1
|
||||||
|
|
||||||
tool k8s.io/code-generator
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-logr/logr v1.4.3
|
cloud.google.com/go/storage v1.44.0
|
||||||
github.com/golang/glog v1.2.4
|
github.com/aws/aws-sdk-go-v2 v1.32.1
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.27.42
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.63.3
|
||||||
|
github.com/golang/glog v1.2.2
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/onsi/ginkgo/v2 v2.22.0
|
github.com/olekukonko/tablewriter v0.0.5
|
||||||
github.com/onsi/gomega v1.36.1
|
github.com/onsi/ginkgo/v2 v2.20.2
|
||||||
github.com/prometheus/client_golang v1.22.0
|
github.com/onsi/gomega v1.34.2
|
||||||
|
github.com/prometheus/client_golang v1.20.4
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/spf13/cobra v1.9.1
|
github.com/spf13/cobra v1.8.1
|
||||||
github.com/spf13/viper v1.20.1
|
github.com/spf13/viper v1.19.0
|
||||||
github.com/stretchr/testify v1.10.0
|
github.com/stretchr/testify v1.9.0
|
||||||
go.uber.org/zap v1.27.0
|
go.uber.org/zap v1.27.0
|
||||||
golang.org/x/mod v0.25.0
|
gocloud.dev v0.39.0
|
||||||
golang.org/x/time v0.8.0
|
golang.org/x/net v0.29.0
|
||||||
helm.sh/helm/v3 v3.17.3
|
golang.org/x/time v0.7.0
|
||||||
k8s.io/api v0.32.5
|
helm.sh/helm/v3 v3.16.1
|
||||||
k8s.io/apiextensions-apiserver v0.32.5
|
k8s.io/api v0.31.0
|
||||||
k8s.io/apimachinery v0.32.5
|
k8s.io/apiextensions-apiserver v0.31.0
|
||||||
k8s.io/client-go v0.32.5
|
k8s.io/apimachinery v0.31.0
|
||||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
k8s.io/client-go v1.5.2
|
||||||
sigs.k8s.io/controller-runtime v0.20.4
|
k8s.io/kubernetes v1.30.2
|
||||||
sigs.k8s.io/scheduler-plugins v0.31.8
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||||
sigs.k8s.io/yaml v1.4.0
|
sigs.k8s.io/controller-runtime v0.17.5
|
||||||
volcano.sh/apis v1.10.0
|
sigs.k8s.io/scheduler-plugins v0.29.8
|
||||||
|
volcano.sh/apis v1.9.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
cel.dev/expr v0.16.1 // indirect
|
||||||
|
cloud.google.com/go v0.115.1 // indirect
|
||||||
|
cloud.google.com/go/auth v0.9.3 // indirect
|
||||||
|
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
|
||||||
|
cloud.google.com/go/compute/metadata v0.5.1 // indirect
|
||||||
|
cloud.google.com/go/iam v1.2.1 // indirect
|
||||||
|
cloud.google.com/go/monitoring v1.21.0 // indirect
|
||||||
dario.cat/mergo v1.0.1 // indirect
|
dario.cat/mergo v1.0.1 // indirect
|
||||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
|
||||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||||
|
@ -42,15 +55,34 @@ require (
|
||||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.12.4 // indirect
|
github.com/Microsoft/hcsshim v0.12.4 // indirect
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||||
|
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.40 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.16 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.20 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.20 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.1 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.24.1 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.1 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.32.1 // indirect
|
||||||
|
github.com/aws/smithy-go v1.22.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/chai2010/gettext-go v1.0.3 // indirect
|
github.com/chai2010/gettext-go v1.0.3 // indirect
|
||||||
github.com/containerd/containerd v1.7.24 // indirect
|
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
|
||||||
github.com/containerd/errdefs v0.3.0 // indirect
|
github.com/containerd/containerd v1.7.19 // indirect
|
||||||
|
github.com/containerd/errdefs v0.1.0 // indirect
|
||||||
github.com/containerd/log v0.1.0 // indirect
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
github.com/containerd/platforms v0.2.1 // indirect
|
github.com/containerd/platforms v0.2.1 // indirect
|
||||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
github.com/cyphar/filepath-securejoin v0.3.1 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/distribution/reference v0.6.0 // indirect
|
github.com/distribution/reference v0.6.0 // indirect
|
||||||
github.com/docker/cli v27.0.3+incompatible // indirect
|
github.com/docker/cli v27.0.3+incompatible // indirect
|
||||||
|
@ -60,56 +92,67 @@ require (
|
||||||
github.com/docker/go-connections v0.5.0 // indirect
|
github.com/docker/go-connections v0.5.0 // indirect
|
||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||||
|
github.com/envoyproxy/go-control-plane v0.13.0 // indirect
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
|
||||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||||
github.com/fatih/color v1.17.0 // indirect
|
github.com/fatih/color v1.17.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
|
||||||
github.com/go-errors/errors v1.5.1 // indirect
|
github.com/go-errors/errors v1.5.1 // indirect
|
||||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-logr/zapr v1.3.0 // indirect
|
github.com/go-logr/zapr v1.3.0 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||||
github.com/go-openapi/swag v0.23.0 // indirect
|
github.com/go-openapi/swag v0.23.0 // indirect
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||||
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
|
|
||||||
github.com/gobwas/glob v0.2.3 // indirect
|
github.com/gobwas/glob v0.2.3 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/google/btree v1.1.3 // indirect
|
github.com/google/btree v1.1.2 // indirect
|
||||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||||
github.com/google/go-cmp v0.7.0 // indirect
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect
|
||||||
|
github.com/google/s2a-go v0.1.8 // indirect
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||||
|
github.com/google/wire v0.6.0 // indirect
|
||||||
|
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||||
|
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
|
||||||
github.com/gorilla/mux v1.8.1 // indirect
|
github.com/gorilla/mux v1.8.1 // indirect
|
||||||
github.com/gorilla/websocket v1.5.3 // indirect
|
github.com/gorilla/websocket v1.5.3 // indirect
|
||||||
github.com/gosuri/uitable v0.0.4 // indirect
|
github.com/gosuri/uitable v0.0.4 // indirect
|
||||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/huandu/xstrings v1.5.0 // indirect
|
github.com/huandu/xstrings v1.5.0 // indirect
|
||||||
|
github.com/imdario/mergo v0.3.16 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.18.0 // indirect
|
github.com/klauspost/compress v1.17.9 // indirect
|
||||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||||
github.com/lib/pq v1.10.9 // indirect
|
github.com/lib/pq v1.10.9 // indirect
|
||||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||||
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||||
github.com/moby/locker v1.0.1 // indirect
|
github.com/moby/locker v1.0.1 // indirect
|
||||||
github.com/moby/spdystream v0.5.0 // indirect
|
github.com/moby/spdystream v0.4.0 // indirect
|
||||||
github.com/moby/term v0.5.0 // indirect
|
github.com/moby/term v0.5.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
@ -118,90 +161,105 @@ require (
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.6.1 // indirect
|
||||||
github.com/prometheus/common v0.62.0 // indirect
|
github.com/prometheus/common v0.55.0 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
github.com/rivo/uniseg v0.4.7 // indirect
|
github.com/rivo/uniseg v0.4.7 // indirect
|
||||||
github.com/rubenv/sql-migrate v1.7.1 // indirect
|
github.com/rubenv/sql-migrate v1.7.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
github.com/sagikazarmark/locafero v0.6.0 // indirect
|
||||||
|
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||||
github.com/shopspring/decimal v1.4.0 // indirect
|
github.com/shopspring/decimal v1.4.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||||
github.com/spf13/afero v1.12.0 // indirect
|
github.com/spf13/afero v1.11.0 // indirect
|
||||||
github.com/spf13/cast v1.7.1 // indirect
|
github.com/spf13/cast v1.7.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.6 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/subosito/gotenv v1.6.0 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||||
github.com/xlab/treeprint v1.2.0 // indirect
|
github.com/xlab/treeprint v1.2.0 // indirect
|
||||||
|
go.opencensus.io v0.24.0 // indirect
|
||||||
|
go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.29.0 // indirect
|
go.opentelemetry.io/otel v1.29.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.29.0 // indirect
|
go.opentelemetry.io/otel/metric v1.29.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/sdk v1.29.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.29.0 // indirect
|
go.opentelemetry.io/otel/trace v1.29.0 // indirect
|
||||||
|
go.starlark.net v0.0.0-20240705175910-70002002b310 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.36.0 // indirect
|
golang.org/x/crypto v0.27.0 // indirect
|
||||||
golang.org/x/net v0.38.0 // indirect
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||||
golang.org/x/oauth2 v0.25.0 // indirect
|
golang.org/x/oauth2 v0.23.0 // indirect
|
||||||
golang.org/x/sync v0.12.0 // indirect
|
golang.org/x/sync v0.8.0 // indirect
|
||||||
golang.org/x/sys v0.31.0 // indirect
|
golang.org/x/sys v0.25.0 // indirect
|
||||||
golang.org/x/term v0.30.0 // indirect
|
golang.org/x/term v0.24.0 // indirect
|
||||||
golang.org/x/text v0.23.0 // indirect
|
golang.org/x/text v0.18.0 // indirect
|
||||||
golang.org/x/tools v0.26.0 // indirect
|
golang.org/x/tools v0.24.0 // indirect
|
||||||
|
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
|
google.golang.org/api v0.197.0 // indirect
|
||||||
google.golang.org/grpc v1.67.3 // indirect
|
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||||
google.golang.org/protobuf v1.36.5 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||||
|
google.golang.org/grpc v1.66.2 // indirect
|
||||||
|
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect
|
||||||
|
google.golang.org/protobuf v1.34.2 // indirect
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apiserver v0.32.5 // indirect
|
k8s.io/apiserver v0.31.0 // indirect
|
||||||
k8s.io/cli-runtime v0.32.5 // indirect
|
k8s.io/cli-runtime v0.31.0 // indirect
|
||||||
k8s.io/code-generator v0.32.5 // indirect
|
k8s.io/component-base v0.31.0 // indirect
|
||||||
k8s.io/component-base v0.32.5 // indirect
|
|
||||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
|
||||||
k8s.io/klog/v2 v2.130.1 // indirect
|
k8s.io/klog/v2 v2.130.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f // indirect
|
||||||
k8s.io/kubectl v0.32.2 // indirect
|
k8s.io/kubectl v0.31.0 // indirect
|
||||||
oras.land/oras-go v1.2.5 // indirect
|
oras.land/oras-go v1.2.5 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/kustomize/api v0.18.0 // indirect
|
sigs.k8s.io/kustomize/api v0.17.2 // indirect
|
||||||
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
|
sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||||
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
k8s.io/api => k8s.io/api v0.32.5
|
k8s.io/api => k8s.io/api v0.29.3
|
||||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.5
|
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.3
|
||||||
k8s.io/apimachinery => k8s.io/apimachinery v0.32.5
|
k8s.io/apimachinery => k8s.io/apimachinery v0.29.3
|
||||||
k8s.io/apiserver => k8s.io/apiserver v0.32.5
|
k8s.io/apiserver => k8s.io/apiserver v0.29.3
|
||||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.5
|
k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.3
|
||||||
k8s.io/client-go => k8s.io/client-go v0.32.5
|
k8s.io/client-go => k8s.io/client-go v0.29.3
|
||||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.32.5
|
k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.3
|
||||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.32.5
|
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.3
|
||||||
k8s.io/code-generator => k8s.io/code-generator v0.32.5
|
k8s.io/code-generator => k8s.io/code-generator v0.29.3
|
||||||
k8s.io/component-base => k8s.io/component-base v0.32.5
|
k8s.io/component-base => k8s.io/component-base v0.29.3
|
||||||
k8s.io/component-helpers => k8s.io/component-helpers v0.32.5
|
k8s.io/controller-manager => k8s.io/controller-manager v0.29.3
|
||||||
k8s.io/componenti-base => k8s.io/componenti-base v0.32.5
|
k8s.io/cri-api => k8s.io/cri-api v0.29.3
|
||||||
k8s.io/controller-manager => k8s.io/controller-manager v0.32.5
|
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.3
|
||||||
k8s.io/cri-api => k8s.io/cri-api v0.32.5
|
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.3
|
||||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.5
|
k8s.io/endpointslice => k8s.io/endpointslice v0.29.3
|
||||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.32.5
|
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.3
|
||||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.32.5
|
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.3
|
||||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.32.5
|
k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.3
|
||||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.32.5
|
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.3
|
||||||
k8s.io/kubectl => k8s.io/kubectl v0.32.5
|
k8s.io/kubectl => k8s.io/kubectl v0.29.3
|
||||||
k8s.io/kubelet => k8s.io/kubelet v0.32.5
|
k8s.io/kubelet => k8s.io/kubelet v0.29.3
|
||||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.32.5
|
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.3
|
||||||
k8s.io/metrics => k8s.io/metrics v0.32.5
|
k8s.io/metrics => k8s.io/metrics v0.29.3
|
||||||
k8s.io/mount-utils => k8s.io/mount-utils v0.32.5
|
k8s.io/mount-utils => k8s.io/mount-utils v0.29.3
|
||||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.32.5
|
k8s.io/node-api => k8s.io/node-api v0.29.3
|
||||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.32.5
|
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.3
|
||||||
k8s.io/sample-controller => k8s.io/sample-controller v0.32.5
|
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.3
|
||||||
|
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.3
|
||||||
|
k8s.io/sample-controller => k8s.io/sample-controller v0.29.3
|
||||||
)
|
)
|
||||||
|
|
497
go.sum
497
go.sum
|
@ -1,3 +1,26 @@
|
||||||
|
cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g=
|
||||||
|
cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8=
|
||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ=
|
||||||
|
cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc=
|
||||||
|
cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U=
|
||||||
|
cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk=
|
||||||
|
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
|
||||||
|
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
|
||||||
|
cloud.google.com/go/compute/metadata v0.5.1 h1:NM6oZeZNlYjiwYje+sYFjEpP0Q0zCan1bmQW/KmIrGs=
|
||||||
|
cloud.google.com/go/compute/metadata v0.5.1/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
|
||||||
|
cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU=
|
||||||
|
cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g=
|
||||||
|
cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs=
|
||||||
|
cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A=
|
||||||
|
cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc=
|
||||||
|
cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0=
|
||||||
|
cloud.google.com/go/monitoring v1.21.0 h1:EMc0tB+d3lUewT2NzKC/hr8cSR9WsUieVywzIHetGro=
|
||||||
|
cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4=
|
||||||
|
cloud.google.com/go/storage v1.44.0 h1:abBzXf4UJKMmQ04xxJf9dYM/fNl24KHoTuBjyJDX2AI=
|
||||||
|
cloud.google.com/go/storage v1.44.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE=
|
||||||
|
cloud.google.com/go/trace v1.11.0 h1:UHX6cOJm45Zw/KIbqHe4kII8PupLt/V5tscZUkeiJVI=
|
||||||
|
cloud.google.com/go/trace v1.11.0/go.mod h1:Aiemdi52635dBR7o3zuc9lLjXo3BwGaChEjCa3tJNmM=
|
||||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||||
|
@ -6,10 +29,19 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9
|
||||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE=
|
||||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||||
|
@ -32,6 +64,46 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||||
|
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||||
|
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.32.1 h1:8WuZ43ytA+TV6QEPT/R23mr7pWyI7bSSiEHdt9BS2Pw=
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.32.1/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 h1:xDAuZTn4IMm8o1LnBZvmrL8JA1io4o3YWNXgohbf20g=
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5/go.mod h1:wYSv6iDS621sEFLfKvpPE2ugjTuGlAG7iROg0hLOkfc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.27.42 h1:Zsy9coUPuOsCWkjTvHpl2/DB9bptXtv7WeNPxvFr87s=
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.27.42/go.mod h1:FGASs+PuJM2EY+8rt8qyQKLPbbX/S5oY+6WzJ/KE7ko=
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.40 h1:RjnlA7t0p/IamxAM7FUJ5uS13Vszh4sjVGvsx91tGro=
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.40/go.mod h1:dgpdnSs1Bp/atS6vLlW83h9xZPP+uSPB/27dFSgC1BM=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.16 h1:fwrer1pJeaiia0CcOfWVbZxvj9Adc7rsuaMTwPR0DIA=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.16/go.mod h1:XyEwwp8XI4zMar7MTnJ0Sk7qY/9aN8Hp929XhuX5SF8=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayxSX32HDw73Yb6Hh2izDSFxXY=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.20 h1:OErdlGnt+hg3tTwGYAlKvFkKVUo/TXkoHcxDxuhYYU8=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.20/go.mod h1:HsPfuL5gs+407ByRXBMgpYoyrV1sgMrzd18yMXQHJpo=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.20 h1:822cE1CYSwY/EZnErlF46pyynuxvf1p+VydHRQW+XNs=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.20/go.mod h1:79/Tn7H7hYC5Gjz6fbnOV4OeBpkao7E8Tv95RO72pMM=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18 h1:OWYvKL53l1rbsUmW7bQyJVsYU/Ii3bbAAQIIFNbM0Tk=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18/go.mod h1:CUx0G1v3wG6l01tUB+j7Y8kclA8NSqK4ef0YG79a4cg=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20 h1:rTWjG6AvWekO2B1LHeM3ktU7MqyX9rzWQ7hgzneZW7E=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20/go.mod h1:RGW2DDpVc8hu6Y6yG8G5CHVmVOAn1oV8rNKOHRJyswg=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.1 h1:5vBMBTakOvtd8aNaicswcrr9qqCYUlasuzyoU6/0g8I=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.1/go.mod h1:WSUbDa5qdg05Q558KXx2Scb+EDvOPXT9gfET0fyrJSk=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18 h1:eb+tFOIl9ZsUe2259/BKPeniKuz4/02zZFH/i4Nf8Rg=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18/go.mod h1:GVCC2IJNJTmdlyEsSmofEy7EfJncP7DNnXDzRjJ5Keg=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.63.3 h1:3zt8qqznMuAZWDTDpcwv9Xr11M/lVj2FsRR7oYBt0OA=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.63.3/go.mod h1:NLTqRLe3pUNu3nTEHI6XlHLKYmc8fbHUdMxAB6+s41Q=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.24.1 h1:aAIr0WhAgvKrxZtkBqne87Gjmd7/lJVTFkR2l2yuhL8=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.24.1/go.mod h1:8XhxGMWUfikJuginPQl5SGZ0LSJuNX3TCEQmFWZwHTM=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.1 h1:J6kIsIkgFOaU6aKjigXJoue1XEHtKIIrpSh4vKdmRTs=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.1/go.mod h1:2V2JLP7tXOmUbL3Hd1ojq+774t2KUAEQ35//shoNEL0=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.32.1 h1:q76Ig4OaJzVJGNUSGO3wjSTBS94g+EhHIbpY9rPvkxs=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.32.1/go.mod h1:664dajZ7uS7JMUMUG0R5bWbtN97KECNCVdFDdQ6Ipu8=
|
||||||
|
github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
|
||||||
|
github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
@ -46,28 +118,35 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembj
|
||||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
|
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
|
||||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80=
|
github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80=
|
||||||
github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
|
github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||||
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
|
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
|
||||||
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
|
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
|
||||||
github.com/containerd/containerd v1.7.24 h1:zxszGrGjrra1yYJW/6rhm9cJ1ZQ8rkKBR48brqsa7nA=
|
github.com/containerd/containerd v1.7.19 h1:/xQ4XRJ0tamDkdzrrBAUy/LE5nCcxFKdBm4EcPrSMEE=
|
||||||
github.com/containerd/containerd v1.7.24/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw=
|
github.com/containerd/containerd v1.7.19/go.mod h1:h4FtNYUUMB4Phr6v+xG89RYKj9XccvbNSCKjdufCrkc=
|
||||||
github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
|
github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
|
||||||
github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
|
||||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
|
||||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||||
github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM=
|
github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE=
|
||||||
github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
@ -94,10 +173,18 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arX
|
||||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||||
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
|
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
|
||||||
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
|
||||||
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||||
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
|
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
|
||||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
||||||
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
|
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
|
||||||
|
@ -108,10 +195,8 @@ github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7Dlme
|
||||||
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
|
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
|
||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
|
||||||
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
|
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
|
||||||
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||||
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
|
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
|
||||||
|
@ -120,8 +205,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||||
|
@ -137,41 +222,70 @@ github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqw
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
|
|
||||||
github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
|
||||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=
|
||||||
|
github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||||
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
|
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
|
||||||
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
|
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
|
||||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
|
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
|
||||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M=
|
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo=
|
||||||
|
github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI=
|
||||||
|
github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk=
|
||||||
|
github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
|
||||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
|
||||||
|
github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
|
||||||
|
github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||||
|
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
|
||||||
|
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||||
|
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||||
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI=
|
||||||
|
github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
|
||||||
|
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
|
||||||
|
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
|
||||||
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
|
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
|
||||||
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
|
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
|
||||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||||
|
@ -189,10 +303,18 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
|
||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||||
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
|
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
|
||||||
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||||
|
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||||
|
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||||
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
||||||
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
|
@ -204,8 +326,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
@ -222,6 +344,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||||
|
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||||
|
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
|
@ -229,6 +353,7 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||||
|
@ -240,16 +365,16 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1
|
||||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
|
||||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||||
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
|
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
|
||||||
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
|
||||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
|
||||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
@ -266,16 +391,18 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||||
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
|
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||||
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||||
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
|
github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
|
||||||
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
|
||||||
|
github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8=
|
||||||
|
github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc=
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
|
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
|
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
|
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
|
||||||
|
@ -283,6 +410,8 @@ github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rK
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
|
||||||
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
@ -291,16 +420,17 @@ github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjz
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||||
|
@ -311,14 +441,16 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||||
github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4=
|
github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI=
|
||||||
github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4=
|
github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
|
github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
|
||||||
github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
|
github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0=
|
||||||
|
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||||
|
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||||
|
@ -328,30 +460,34 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||||
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
|
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||||
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
|
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||||
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
|
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||||
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
|
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
|
||||||
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
|
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
|
@ -363,6 +499,7 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
|
||||||
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
|
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
|
||||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=
|
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=
|
||||||
|
@ -371,85 +508,184 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1
|
||||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||||
|
go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ=
|
||||||
|
go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||||
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
||||||
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
|
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
|
||||||
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
|
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
|
||||||
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
|
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
|
||||||
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
|
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
|
||||||
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
|
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
|
||||||
|
go.starlark.net v0.0.0-20240705175910-70002002b310 h1:tEAOMoNmN2MqVNi0MMEWpTtPI4YNCXgxmAGtuv3mST0=
|
||||||
|
go.starlark.net v0.0.0-20240705175910-70002002b310/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||||
|
gocloud.dev v0.39.0 h1:EYABYGhAalPUaMrbSKOr5lejxoxvXj99nE8XFtsDgds=
|
||||||
|
gocloud.dev v0.39.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||||
|
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||||
|
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
||||||
|
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||||
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
|
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
||||||
|
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
|
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||||
|
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||||
|
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||||
|
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||||
|
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
|
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||||
|
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||||
|
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
|
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||||
|
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||||
|
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
|
||||||
|
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
|
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||||
|
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
|
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||||
|
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
|
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||||
|
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||||
|
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
|
||||||
|
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk=
|
||||||
|
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A=
|
google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
|
google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw=
|
||||||
google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU=
|
||||||
|
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||||
|
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
|
||||||
|
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
|
||||||
|
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw=
|
||||||
|
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||||
|
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
@ -458,7 +694,10 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSP
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
|
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||||
|
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
@ -466,49 +705,49 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
||||||
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
|
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
|
||||||
helm.sh/helm/v3 v3.17.3 h1:3n5rW3D0ArjFl0p4/oWO8IbY/HKaNNwJtOQFdH2AZHg=
|
helm.sh/helm/v3 v3.16.1 h1:cER6tI/8PgUAsaJaQCVBUg3VI9KN4oVaZJgY60RIc0c=
|
||||||
helm.sh/helm/v3 v3.17.3/go.mod h1:+uJKMH/UiMzZQOALR3XUf3BLIoczI2RKKD6bMhPh4G8=
|
helm.sh/helm/v3 v3.16.1/go.mod h1:r+xBHHP20qJeEqtvBXMf7W35QDJnzY/eiEBzt+TfHps=
|
||||||
k8s.io/api v0.32.5 h1:uqjjsYo1kTJr5NIcoIaP9F+TgXgADH7nKQx91FDAhtk=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
k8s.io/api v0.32.5/go.mod h1:bXXFU3fGCZ/eFMZvfHZC69PeGbXEL4zzjuPVzOxHF64=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
k8s.io/apiextensions-apiserver v0.32.5 h1:o0aKvmzIIs8Uk54pidk32pxET+Pg2ULnh9WI1PuKTwE=
|
k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
|
||||||
k8s.io/apiextensions-apiserver v0.32.5/go.mod h1:5fpedJa3HJJFBukAZ6ur91DEDye5gYuXISPbOiNLYpU=
|
k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
|
||||||
k8s.io/apimachinery v0.32.5 h1:6We3aJ6crC0ap8EhsEXcgX3LpI6SEjubpiOMXLROwPM=
|
k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI=
|
||||||
k8s.io/apimachinery v0.32.5/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc=
|
||||||
k8s.io/apiserver v0.32.5 h1:phmm2EOUVFI+cLiq8Grtuh166fTt/qgvkGPkpgzp5uY=
|
k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
|
||||||
k8s.io/apiserver v0.32.5/go.mod h1:5bfueS1tgARVWVXRJBMI5mHoCmev0jOvbxebai/kiqc=
|
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
|
||||||
k8s.io/cli-runtime v0.32.5 h1:cyf6pJLpOFzxT4PbOKIXFyNbQV2IFP53jGADXtrd6tw=
|
k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE=
|
||||||
k8s.io/cli-runtime v0.32.5/go.mod h1:AcqQUyDDFwc4ymBlPpUXVOkyFVjKi9dnDQn3unv1C7E=
|
k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs=
|
||||||
k8s.io/client-go v0.32.5 h1:huFmQMzgWu0z4kbWsuZci+Gt4Fo72I4CcrvhToZ/Qp0=
|
k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k=
|
||||||
k8s.io/client-go v0.32.5/go.mod h1:Qchw6f9WIVrur7DKojAHpRgGLcANT0RLIvF39Jz58xA=
|
k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM=
|
||||||
k8s.io/code-generator v0.32.5 h1:dvoXgaWTDPLsg0txUzWj5xPV8UwHOsBhmm4JC9Gd1Qo=
|
k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
|
||||||
k8s.io/code-generator v0.32.5/go.mod h1:7S6jUv4ZAnI2yDUJUQUEuc3gv6+qFhnkB5Fhs9Eb0d8=
|
k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
|
||||||
k8s.io/component-base v0.32.5 h1:2HiX+m3s9Iz5CMqdCVDH2V942UqzQvjuhcXb4W+KCsg=
|
k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo=
|
||||||
k8s.io/component-base v0.32.5/go.mod h1:jDsPNFFElv9m27TcYxlpEX7TZ3vdgx2g4PaqMUHpV/Y=
|
k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio=
|
||||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
|
||||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
|
||||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f h1:2sXuKesAYbRHxL3aE2PN6zX/gcJr22cjrsej+W784Tc=
|
||||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc=
|
||||||
k8s.io/kubectl v0.32.5 h1:LUHZhYkIu9Cji6x1V7s4uyCGxJlS461VSYj2GyuQJtM=
|
k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us=
|
||||||
k8s.io/kubectl v0.32.5/go.mod h1:YA7mZP44lVEn9qXRinM9THMNvVWJ6edwyHZSVMTVQbo=
|
k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4=
|
||||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
k8s.io/kubernetes v1.30.2 h1:11WhS78OYX/lnSy6TXxPO6Hk+E5K9ZNrEsk9JgMSX8I=
|
||||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
k8s.io/kubernetes v1.30.2/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0=
|
||||||
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||||
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo=
|
oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo=
|
||||||
oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo=
|
oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo=
|
||||||
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
|
sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw=
|
||||||
sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
|
sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY=
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
|
sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g=
|
||||||
sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U=
|
sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0=
|
||||||
sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E=
|
sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ=
|
||||||
sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo=
|
sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U=
|
||||||
sigs.k8s.io/scheduler-plugins v0.31.8 h1:Ie2EFRnkE9T2tBjxwypww7hJJyPRIwrXJNZeNxjP6QY=
|
sigs.k8s.io/scheduler-plugins v0.29.8 h1:T3qyi/mi+TwOEERAazwqJBjTWrMVfDS18DC2Es4g6HQ=
|
||||||
sigs.k8s.io/scheduler-plugins v0.31.8/go.mod h1:KkcXEbf9CYaoZ5ntbAMSYmquPq9MtSfXVpI31R6mHeM=
|
sigs.k8s.io/scheduler-plugins v0.29.8/go.mod h1:e8M31FE7JWXkx9yIZIwsJDwvTcmUAqWchy9MJRNGDDk=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||||
volcano.sh/apis v1.10.0 h1:Z9eLwibQmhpFmYGLWxjsTWwsYeTEKvvjFcLptmP2qxE=
|
volcano.sh/apis v1.9.0 h1:e+9yEbQOi6HvgaayAxYULT6n+59mkYvmqjKhp9Z06sY=
|
||||||
volcano.sh/apis v1.10.0/go.mod h1:z8hhFZ2qcUMR1JIjVYmBqL98CVaXNzsQAcqKiytQW9s=
|
volcano.sh/apis v1.9.0/go.mod h1:yXNfsZRzAOq6EUyPJYFrlMorh1XsYQGonGWyr4IiznM=
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
## Updating [client-go](../client-go) directory
|
|
||||||
[client-go](../client-go) directory contains clientset, informers, and listers generated by [kubernetes/code-generator](https://github.com/kubernetes/code-generator).
|
|
||||||
|
|
||||||
### Update files in [client-go](../client-go) directory
|
|
||||||
```bash
|
|
||||||
./hack/update-codegen.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
### Verify changes to files in [client-go](../client-go) directory
|
|
||||||
```bash
|
|
||||||
./hack/verify-codegen.sh
|
|
||||||
```
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 The Kubeflow authors.
|
Copyright 2024 The Kubeflow authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|
|
@ -1,72 +0,0 @@
|
||||||
import argparse
|
|
||||||
|
|
||||||
from github import Github
|
|
||||||
|
|
||||||
REPO_NAME = "kubeflow/spark-operator"
|
|
||||||
CHANGELOG_FILE = "CHANGELOG.md"
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument("--token", type=str, help="GitHub Access Token")
|
|
||||||
parser.add_argument(
|
|
||||||
"--range", type=str, help="Changelog is generated for this release range"
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if args.token is None:
|
|
||||||
raise Exception("GitHub Token must be set")
|
|
||||||
try:
|
|
||||||
previous_release = args.range.split("..")[0]
|
|
||||||
current_release = args.range.split("..")[1]
|
|
||||||
except Exception:
|
|
||||||
raise Exception("Release range must be set in this format: v1.7.0..v1.8.0")
|
|
||||||
|
|
||||||
# Get list of commits from the range.
|
|
||||||
github_repo = Github(args.token).get_repo(REPO_NAME)
|
|
||||||
comparison = github_repo.compare(previous_release, current_release)
|
|
||||||
commits = comparison.commits
|
|
||||||
|
|
||||||
# The latest commit contains the release date.
|
|
||||||
release_date = str(commits[-1].commit.author.date).split(" ")[0]
|
|
||||||
release_url = "https://github.com/{}/tree/{}".format(REPO_NAME, current_release)
|
|
||||||
|
|
||||||
# Get all PRs in reverse chronological order from the commits.
|
|
||||||
pr_list = ""
|
|
||||||
pr_set = set()
|
|
||||||
for commit in commits.reversed:
|
|
||||||
# Only add commits with PRs.
|
|
||||||
for pr in commit.get_pulls():
|
|
||||||
# Each PR is added only one time to the list.
|
|
||||||
if pr.number in pr_set:
|
|
||||||
continue
|
|
||||||
if not pr.merged:
|
|
||||||
continue
|
|
||||||
pr_set.add(pr.number)
|
|
||||||
|
|
||||||
new_pr = "- {title} ([#{id}]({pr_link}) by [@{user_id}]({user_url}))\n".format(
|
|
||||||
title=pr.title,
|
|
||||||
id=pr.number,
|
|
||||||
pr_link=pr.html_url,
|
|
||||||
user_id=pr.user.login,
|
|
||||||
user_url=pr.user.html_url,
|
|
||||||
)
|
|
||||||
pr_list += new_pr
|
|
||||||
|
|
||||||
change_log = [
|
|
||||||
"\n",
|
|
||||||
"## [{}]({}) ({})\n".format(current_release, release_url, release_date),
|
|
||||||
"\n",
|
|
||||||
pr_list,
|
|
||||||
"\n",
|
|
||||||
"[Full Changelog]({})\n".format(comparison.html_url),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Update Changelog with the new changes.
|
|
||||||
with open(CHANGELOG_FILE, "r+") as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
f.seek(0)
|
|
||||||
lines = lines[:1] + change_log + lines[1:]
|
|
||||||
f.writelines(lines)
|
|
||||||
|
|
||||||
print("Changelog has been updated\n")
|
|
||||||
print("Group PRs in the Changelog into Features, Bug fixes, Misc, etc.\n")
|
|
||||||
print("After that, submit a PR with the updated Changelog")
|
|
|
@ -0,0 +1,91 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Copyright 2017 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# generate-groups generates everything for a project with external types only, e.g. a project based
|
||||||
|
# on CustomResourceDefinitions.
|
||||||
|
|
||||||
|
if [ "$#" -lt 4 ] || [ "${1}" == "--help" ]; then
|
||||||
|
cat <<EOF
|
||||||
|
Usage: $(basename "$0") <generators> <output-package> <apis-package> <groups-versions> ...
|
||||||
|
|
||||||
|
<generators> the generators comma separated to run (deepcopy,defaulter,client,lister,informer) or "all".
|
||||||
|
<output-package> the output package name (e.g. github.com/example/project/pkg/generated).
|
||||||
|
<apis-package> the external types dir (e.g. github.com/example/api or github.com/example/project/pkg/apis).
|
||||||
|
<groups-versions> the groups and their versions in the format "groupA:v1,v2 groupB:v1 groupC:v2", relative
|
||||||
|
to <api-package>.
|
||||||
|
... arbitrary flags passed to all generator binaries.
|
||||||
|
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
$(basename "$0") all github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1"
|
||||||
|
$(basename "$0") deepcopy,client github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1"
|
||||||
|
EOF
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
GENS="$1"
|
||||||
|
OUTPUT_PKG="$2"
|
||||||
|
APIS_PKG="$3"
|
||||||
|
GROUPS_WITH_VERSIONS="$4"
|
||||||
|
shift 4
|
||||||
|
|
||||||
|
go install k8s.io/code-generator/cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen}
|
||||||
|
|
||||||
|
# Go installs the above commands to get installed in $GOBIN if defined, and $GOPATH/bin otherwise:
|
||||||
|
GOBIN="$(go env GOBIN)"
|
||||||
|
gobin="${GOBIN:-$(go env GOPATH)/bin}"
|
||||||
|
|
||||||
|
function codegen::join() { local IFS="$1"; shift; echo "$*"; }
|
||||||
|
|
||||||
|
# enumerate group versions
|
||||||
|
FQ_APIS=() # e.g. k8s.io/api/apps/v1
|
||||||
|
for GVs in ${GROUPS_WITH_VERSIONS}; do
|
||||||
|
IFS=: read -r G Vs <<<"${GVs}"
|
||||||
|
|
||||||
|
# enumerate versions
|
||||||
|
for V in ${Vs//,/ }; do
|
||||||
|
FQ_APIS+=("${APIS_PKG}/${G}/${V}")
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then
|
||||||
|
echo "Generating deepcopy funcs"
|
||||||
|
"${gobin}/deepcopy-gen" --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${APIS_PKG}" "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then
|
||||||
|
echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}"
|
||||||
|
"${gobin}/client-gen" --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" --input-base "" --input "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then
|
||||||
|
echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers"
|
||||||
|
"${gobin}/lister-gen" --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/listers" "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then
|
||||||
|
echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers"
|
||||||
|
"${gobin}/informer-gen" \
|
||||||
|
--input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \
|
||||||
|
--versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \
|
||||||
|
--listers-package "${OUTPUT_PKG}/listers" \
|
||||||
|
--output-package "${OUTPUT_PKG}/informers" \
|
||||||
|
"$@"
|
||||||
|
fi
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue