Update the process to build api-docs, generate CRD manifests and code (#2046)
* Update .gitignore Signed-off-by: Yi Chen <github@chenyicn.net> * Update .dockerignore Signed-off-by: Yi Chen <github@chenyicn.net> * Update Makefile Signed-off-by: Yi Chen <github@chenyicn.net> * Update the process to generate api docs Signed-off-by: Yi Chen <github@chenyicn.net> * Update the workflow to generate api docs Signed-off-by: Yi Chen <github@chenyicn.net> * Use controller-gen to generate CRD and deep copy related methods Signed-off-by: Yi Chen <github@chenyicn.net> * Update helm chart CRDs Signed-off-by: Yi Chen <github@chenyicn.net> * Update workflow for building spark operator Signed-off-by: Yi Chen <github@chenyicn.net> * Update README.md Signed-off-by: Yi Chen <github@chenyicn.net> --------- Signed-off-by: Yi Chen <github@chenyicn.net>
This commit is contained in:
parent
8894a4fedc
commit
779ea3debc
|
@ -1 +1,31 @@
|
|||
vendor
|
||||
.github/
|
||||
.idea/
|
||||
.vscode/
|
||||
bin/
|
||||
charts/
|
||||
docs/
|
||||
config/
|
||||
examples/
|
||||
hack/
|
||||
manifest/
|
||||
spark-docker/
|
||||
sparkctl/
|
||||
test/
|
||||
vendor/
|
||||
.dockerignore
|
||||
.DS_Store
|
||||
.gitignore
|
||||
.gitlab-ci.yaml
|
||||
.golangci.yaml
|
||||
.pre-commit-config.yaml
|
||||
ADOPTERS.md
|
||||
CODE_OF_CONDUCT.md
|
||||
codecov.ymal
|
||||
CONTRIBUTING.md
|
||||
cover.out
|
||||
Dockerfile
|
||||
LICENSE
|
||||
OWNERS
|
||||
PROJECT
|
||||
README.md
|
||||
test.sh
|
||||
|
|
|
@ -18,6 +18,11 @@ jobs:
|
|||
with:
|
||||
fetch-depth: "0"
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: The API documentation hasn't changed
|
||||
run: |
|
||||
make build-api-docs
|
||||
|
@ -42,7 +47,7 @@ jobs:
|
|||
|
||||
- name: build sparkctl
|
||||
run: |
|
||||
make all
|
||||
make build-sparkctl
|
||||
|
||||
build-spark-operator:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -57,18 +62,17 @@ jobs:
|
|||
with:
|
||||
go-version-file: "go.mod"
|
||||
|
||||
- name: Run gofmt check
|
||||
run: make fmt-check
|
||||
- name: Run go fmt check
|
||||
run: make go-fmt
|
||||
|
||||
- name: Run static analysis
|
||||
run: make static-analysis
|
||||
- name: Run go vet
|
||||
run: make go-vet
|
||||
|
||||
- name: Run unit tests
|
||||
run: make unit-test
|
||||
|
||||
- name: Build Spark-Operator Docker Image
|
||||
run: |
|
||||
docker build -t docker.io/kubeflow/spark-operator:latest .
|
||||
run: make docker-build IMAGE_TAG=latest
|
||||
|
||||
- name: Check changes in resources used in docker file
|
||||
run: |
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
.vscode/
|
||||
bin/
|
||||
vendor/
|
||||
spark-operator
|
||||
.idea/
|
||||
**/*.iml
|
||||
cover.out
|
||||
sparkctl/sparkctl
|
||||
spark-on-k8s-operator
|
||||
sparkctl/sparkctl-linux-amd64
|
||||
sparkctl/sparkctl-darwin-amd64
|
||||
**/*.iml
|
||||
|
||||
# Various IDEs
|
||||
.idea/
|
||||
.vscode/
|
307
Makefile
307
Makefile
|
@ -1,16 +1,116 @@
|
|||
|
||||
.SILENT:
|
||||
.PHONY: clean-sparkctl
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
GOBIN=$(shell go env GOPATH)/bin
|
||||
else
|
||||
GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
# Setting SHELL to bash allows bash commands to be executed by recipes.
|
||||
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
|
||||
SHELL = /usr/bin/env bash -o pipefail
|
||||
.SHELLFLAGS = -ec
|
||||
|
||||
REPO=github.com/kubeflow/spark-operator
|
||||
SPARK_OPERATOR_GOPATH=/go/src/github.com/kubeflow/spark-operator
|
||||
SPARK_OPERATOR_CHART_PATH=charts/spark-operator-chart
|
||||
OPERATOR_VERSION ?= $$(grep appVersion $(SPARK_OPERATOR_CHART_PATH)/Chart.yaml | awk '{print $$2}')
|
||||
DEP_VERSION:=`grep DEP_VERSION= Dockerfile | awk -F\" '{print $$2}'`
|
||||
BUILDER=`grep "FROM golang:" Dockerfile | awk '{print $$2}'`
|
||||
UNAME:=`uname | tr '[:upper:]' '[:lower:]'`
|
||||
REPO=github.com/kubeflow/spark-operator
|
||||
|
||||
all: clean-sparkctl build-sparkctl install-sparkctl
|
||||
# CONTAINER_TOOL defines the container tool to be used for building images.
|
||||
# Be aware that the target commands are only tested with Docker which is
|
||||
# scaffolded by default. However, you might want to replace it to use other
|
||||
# tools. (i.e. podman)
|
||||
CONTAINER_TOOL ?= docker
|
||||
|
||||
build-sparkctl:
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMAGE_REPOSITORY ?= docker.io/kubeflow/spark-operator
|
||||
IMAGE_TAG ?= $(OPERATOR_VERSION)
|
||||
OPERATOR_IMAGE ?= $(IMAGE_REPOSITORY):$(IMAGE_TAG)
|
||||
|
||||
##@ General
|
||||
|
||||
# The help target prints out all targets with their descriptions organized
|
||||
# beneath their categories. The categories are represented by '##@' and the
|
||||
# target descriptions by '##'. The awk command is responsible for reading the
|
||||
# entire set of makefiles included in this invocation, looking for lines of the
|
||||
# file as xyz: ## something, and then pretty-format the target and help. Then,
|
||||
# if there's a line with ##@ something, that gets pretty-printed as a category.
|
||||
# More info on the usage of ANSI control characters for terminal formatting:
|
||||
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
|
||||
# More info on the awk command:
|
||||
# http://linuxcommand.org/lc3_adv_awk.php
|
||||
|
||||
.PHONY: help
|
||||
help: ## Display this help.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
##@ Development
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: controller-gen ## Generate CustomResourceDefinition, RBAC and WebhookConfiguration manifests.
|
||||
$(CONTROLLER_GEN) crd rbac:roleName=spark-operator-controller webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
|
||||
.PHONY: generate
|
||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
|
||||
.PHONY: update-crd
|
||||
update-crd: manifests ## Update CRD files in the Helm chart.
|
||||
cp config/crd/bases/* charts/spark-operator-chart/crds/
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Clean up caches and output.
|
||||
@echo "cleaning up caches and output"
|
||||
go clean -cache -testcache -r -x 2>&1 >/dev/null
|
||||
-rm -rf _output
|
||||
|
||||
.PHONY: go-fmt
|
||||
go-fmt: ## Run go fmt against code.
|
||||
@echo "Running go fmt..."
|
||||
if [ -n "$(shell go fmt ./...)" ]; then \
|
||||
echo "Go code is not formatted, need to run \"make go-fmt\" and commit the changes."; \
|
||||
false; \
|
||||
else \
|
||||
echo "Go code is formatted."; \
|
||||
fi
|
||||
|
||||
.PHONY: go-vet
|
||||
go-vet: ## Run go vet against code.
|
||||
@echo "Running go vet..."
|
||||
go vet ./...
|
||||
|
||||
.PHONY: lint
|
||||
lint: golangci-lint ## Run golangci-lint linter.
|
||||
@echo "Running golangci-lint run..."
|
||||
$(GOLANGCI_LINT) run
|
||||
|
||||
.PHONY: lint-fix
|
||||
lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes.
|
||||
@echo "Running golangci-lint run --fix..."
|
||||
$(GOLANGCI_LINT) run --fix
|
||||
|
||||
.PHONY: unit-test
|
||||
unit-test: clean ## Run go unit tests.
|
||||
@echo "running unit tests"
|
||||
go test $(shell go list ./... | grep -v /e2e) -coverprofile cover.out
|
||||
|
||||
.PHONY: e2e-test
|
||||
e2e-test: clean ## Run go integration tests.
|
||||
@echo "running integration tests"
|
||||
go test -v ./test/e2e/ --kubeconfig "$(HOME)/.kube/config" --operator-image=docker.io/spark-operator/spark-operator:local
|
||||
|
||||
##@ Build
|
||||
|
||||
.PHONY: build-operator
|
||||
build-operator: ## Build spark-operator binary.
|
||||
go build -o bin/spark-operator main.go
|
||||
|
||||
.PHONY: build-sparkctl
|
||||
build-sparkctl: ## Build sparkctl binary.
|
||||
[ ! -f "sparkctl/sparkctl-darwin-amd64" ] || [ ! -f "sparkctl/sparkctl-linux-amd64" ] && \
|
||||
echo building using $(BUILDER) && \
|
||||
docker run -w $(SPARK_OPERATOR_GOPATH) \
|
||||
|
@ -19,10 +119,8 @@ build-sparkctl:
|
|||
cd sparkctl && \
|
||||
./build.sh" || true
|
||||
|
||||
clean-sparkctl:
|
||||
rm -f sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64
|
||||
|
||||
install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64
|
||||
.PHONY: install-sparkctl
|
||||
install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 ## Install sparkctl binary.
|
||||
@if [ "$(UNAME)" = "linux" ]; then \
|
||||
echo "installing linux binary to /usr/local/bin/sparkctl"; \
|
||||
sudo cp sparkctl/sparkctl-linux-amd64 /usr/local/bin/sparkctl; \
|
||||
|
@ -35,52 +133,161 @@ install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64
|
|||
echo "$(UNAME) not supported"; \
|
||||
fi
|
||||
|
||||
build-api-docs:
|
||||
docker build -t temp-api-ref-docs hack/api-docs
|
||||
docker run -v $$(pwd):/repo/ temp-api-ref-docs \
|
||||
sh -c "cd /repo/ && /go/bin/gen-crd-api-reference-docs \
|
||||
-config /repo/hack/api-docs/api-docs-config.json \
|
||||
-api-dir github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2 \
|
||||
-template-dir /repo/hack/api-docs/api-docs-template \
|
||||
-out-file /repo/docs/api-docs.md"
|
||||
.PHONY: clean-sparkctl
|
||||
clean-sparkctl: ## Clean sparkctl binary.
|
||||
rm -f sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64
|
||||
|
||||
helm-unittest:
|
||||
.PHONY: build-api-docs
|
||||
build-api-docs: gen-crd-api-reference-docs ## Build api documentaion.
|
||||
$(GEN_CRD_API_REFERENCE_DOCS) \
|
||||
-config hack/api-docs/config.json \
|
||||
-api-dir github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2 \
|
||||
-template-dir hack/api-docs/template \
|
||||
-out-file docs/api-docs.md
|
||||
|
||||
# If you wish to build the operator image targeting other platforms you can use the --platform flag.
|
||||
# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it.
|
||||
# More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
||||
.PHONY: docker-build
|
||||
docker-build: ## Build docker image with the operator.
|
||||
$(CONTAINER_TOOL) build -t ${IMAGE_REPOSITORY}:${IMAGE_TAG} .
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push: ## Push docker image with the operator.
|
||||
$(CONTAINER_TOOL) push ${IMAGE_REPOSITORY}:${IMAGE_TAG}
|
||||
|
||||
# PLATFORMS defines the target platforms for the operator image be built to provide support to multiple
|
||||
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
|
||||
# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/
|
||||
# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/
|
||||
# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=<myregistry/image:<tag>> then the export will fail)
|
||||
# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option.
|
||||
PLATFORMS ?= linux/amd64,linux/arm64
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx: ## Build and push docker image for the operator for cross-platform support.
|
||||
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
|
||||
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
|
||||
- $(CONTAINER_TOOL) buildx create --name spark-operator-builder
|
||||
$(CONTAINER_TOOL) buildx use spark-operator-builder
|
||||
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMAGE_REPOSITORY}:${IMAGE_TAG} -f Dockerfile.cross .
|
||||
- $(CONTAINER_TOOL) buildx rm spark-operator-builder
|
||||
rm Dockerfile.cross
|
||||
|
||||
##@ Helm
|
||||
|
||||
.PHONY: detect-crds-drift
|
||||
detect-crds-drift:
|
||||
diff -q charts/spark-operator-chart/crds config/crd/bases
|
||||
|
||||
.PHONY: helm-unittest
|
||||
helm-unittest: helm-unittest-plugin ## Run Helm chart unittests.
|
||||
helm unittest charts/spark-operator-chart --strict --file "tests/**/*_test.yaml"
|
||||
|
||||
helm-lint:
|
||||
docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint
|
||||
.PHONY: helm-lint
|
||||
helm-lint: ## Run Helm chart lint test.
|
||||
docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint --target-branch master
|
||||
|
||||
helm-docs:
|
||||
.PHONY: helm-docs
|
||||
helm-docs: ## Generates markdown documentation for helm charts from requirements and values files.
|
||||
docker run --rm --volume "$$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:latest
|
||||
|
||||
fmt-check: clean
|
||||
@echo "running fmt check"; cd "$(dirname $0)"; \
|
||||
if [ -n "$(go fmt ./...)" ]; \
|
||||
then \
|
||||
echo "Go code is not formatted, please run 'go fmt ./...'." >&2; \
|
||||
exit 1; \
|
||||
else \
|
||||
echo "Go code is formatted"; \
|
||||
##@ Deployment
|
||||
|
||||
ifndef ignore-not-found
|
||||
ignore-not-found = false
|
||||
endif
|
||||
|
||||
.PHONY: install-crds
|
||||
install-crds: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | $(KUBECTL) create -f -
|
||||
|
||||
.PHONY: uninstall-crds
|
||||
uninstall-crds: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
$(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
|
||||
.PHONY: deploy
|
||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -
|
||||
|
||||
.PHONY: undeploy
|
||||
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
$(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
|
||||
##@ Dependencies
|
||||
|
||||
## Location to install dependencies to
|
||||
LOCALBIN ?= $(shell pwd)/bin
|
||||
$(LOCALBIN):
|
||||
mkdir -p $(LOCALBIN)
|
||||
|
||||
## Tool Binaries
|
||||
KUBECTL ?= kubectl
|
||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION)
|
||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION)
|
||||
KIND ?= $(LOCALBIN)/kind-$(KIND_VERSION)
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION)
|
||||
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION)
|
||||
GEN_CRD_API_REFERENCE_DOCS ?= $(LOCALBIN)/gen-crd-api-reference-docs-$(GEN_CRD_API_REFERENCE_DOCS_VERSION)
|
||||
HELM ?= helm
|
||||
HELM_UNITTEST ?= unittest
|
||||
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v5.4.1
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.15.0
|
||||
KIND_VERSION ?= v0.23.0
|
||||
ENVTEST_VERSION ?= release-0.18
|
||||
GOLANGCI_LINT_VERSION ?= v1.57.2
|
||||
GEN_CRD_API_REFERENCE_DOCS_VERSION ?= v0.3.0
|
||||
HELM_UNITTEST_VERSION ?= 0.5.1
|
||||
|
||||
.PHONY: kustomize
|
||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
|
||||
$(KUSTOMIZE): $(LOCALBIN)
|
||||
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
|
||||
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION))
|
||||
|
||||
.PHONY: kind
|
||||
kind: $(KIND) ## Download kind locally if necessary.
|
||||
$(KIND): $(LOCALBIN)
|
||||
$(call go-install-tool,$(KIND),sigs.k8s.io/kind,$(KIND_VERSION))
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download setup-envtest locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
$(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION))
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
|
||||
|
||||
.PHONY: gen-crd-api-reference-docs
|
||||
gen-crd-api-reference-docs: $(GEN_CRD_API_REFERENCE_DOCS) ## Download gen-crd-api-reference-docs locally if necessary.
|
||||
$(GEN_CRD_API_REFERENCE_DOCS): $(LOCALBIN)
|
||||
$(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs,$(GEN_CRD_API_REFERENCE_DOCS_VERSION))
|
||||
|
||||
.PHONY: helm-unittest-plugin
|
||||
helm-unittest-plugin: ## Download helm unittest plugin locally if necessary.
|
||||
if [ -z "$(shell helm plugin list | grep unittest)" ]; then \
|
||||
echo "Installing helm unittest plugin..."; \
|
||||
helm plugin install https://github.com/helm-unittest/helm-unittest.git --version $(HELM_UNITTEST_VERSION); \
|
||||
fi
|
||||
|
||||
detect-crds-drift:
|
||||
diff -q charts/spark-operator-chart/crds manifest/crds --exclude=kustomization.yaml
|
||||
|
||||
clean:
|
||||
@echo "cleaning up caches and output"
|
||||
go clean -cache -testcache -r -x 2>&1 >/dev/null
|
||||
-rm -rf _output
|
||||
|
||||
unit-test: clean
|
||||
@echo "running unit tests"
|
||||
go test -v ./... -covermode=atomic
|
||||
|
||||
integration-test: clean
|
||||
@echo "running integration tests"
|
||||
go test -v ./test/e2e/ --kubeconfig "$(HOME)/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:local
|
||||
|
||||
static-analysis:
|
||||
@echo "running go vet"
|
||||
# echo "Building using $(BUILDER)"
|
||||
# go vet ./...
|
||||
go vet $(REPO)...
|
||||
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
|
||||
# $1 - target path with name of binary (ideally with version)
|
||||
# $2 - package url which can be installed
|
||||
# $3 - specific version of package
|
||||
define go-install-tool
|
||||
@[ -f $(1) ] || { \
|
||||
set -e; \
|
||||
package=$(2)@$(3) ;\
|
||||
echo "Downloading $${package}" ;\
|
||||
GOBIN=$(LOCALBIN) go install $${package} ;\
|
||||
mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\
|
||||
}
|
||||
endef
|
||||
|
|
|
@ -31,7 +31,7 @@ The Kubernetes Operator for Apache Spark currently supports the following list o
|
|||
|
||||
**Current API version:** *`v1beta2`*
|
||||
|
||||
**If you are currently using the `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/<version>"` to `apiVersion: "sparkoperator.k8s.io/v1beta2"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f manifest/crds`.**
|
||||
**If you are currently using the `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/<version>"` to `apiVersion: "sparkoperator.k8s.io/v1beta2"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f config/crd/bases`.**
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
apiVersion: v2
|
||||
name: spark-operator
|
||||
description: A Helm chart for Spark on Kubernetes operator
|
||||
version: 1.4.4
|
||||
appVersion: v1beta2-1.6.1-3.5.0
|
||||
version: 1.4.5
|
||||
appVersion: v1beta2-1.6.2-3.5.0
|
||||
keywords:
|
||||
- spark
|
||||
home: https://github.com/kubeflow/spark-operator
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# spark-operator
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
A Helm chart for Spark on Kubernetes operator
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,24 @@
|
|||
# This kustomization.yaml is not intended to be run by itself,
|
||||
# since it depends on service name and namespace that are out of this kustomize package.
|
||||
# It should be run by config/default
|
||||
resources:
|
||||
- bases/sparkoperator.k8s.io_sparkapplications.yaml
|
||||
- bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml
|
||||
# +kubebuilder:scaffold:crdkustomizeresource
|
||||
|
||||
patches:
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
|
||||
# patches here are for enabling the conversion webhook for each CRD
|
||||
# +kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
|
||||
# patches here are for enabling the CA injection for each CRD
|
||||
#- path: patches/cainjection_in_sparkapplications.yaml
|
||||
#- path: patches/cainjection_in_scheduledsparkapplications.yaml
|
||||
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment the following section
|
||||
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
||||
|
||||
#configurations:
|
||||
#- kustomizeconfig.yaml
|
|
@ -0,0 +1,19 @@
|
|||
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
|
||||
nameReference:
|
||||
- kind: Service
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/name
|
||||
|
||||
namespace:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/namespace
|
||||
create: false
|
||||
|
||||
varReference:
|
||||
- path: metadata/annotations
|
|
@ -47,7 +47,7 @@ string
|
|||
<td>
|
||||
<code>metadata</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#objectmeta-v1-meta">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta">
|
||||
Kubernetes meta/v1.ObjectMeta
|
||||
</a>
|
||||
</em>
|
||||
|
@ -197,7 +197,7 @@ string
|
|||
<td>
|
||||
<code>metadata</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#objectmeta-v1-meta">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta">
|
||||
Kubernetes meta/v1.ObjectMeta
|
||||
</a>
|
||||
</em>
|
||||
|
@ -401,7 +401,7 @@ The controller will add environment variable HADOOP_CONF_DIR to the path where t
|
|||
<td>
|
||||
<code>volumes</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volume-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core">
|
||||
[]Kubernetes core/v1.Volume
|
||||
</a>
|
||||
</em>
|
||||
|
@ -768,7 +768,7 @@ string
|
|||
<td>
|
||||
<code>resources</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#resourcelist-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#resourcelist-v1-core">
|
||||
Kubernetes core/v1.ResourceList
|
||||
</a>
|
||||
</em>
|
||||
|
@ -1048,7 +1048,7 @@ This may be useful for sidecar proxies like Envoy injected by Istio which requir
|
|||
<td>
|
||||
<code>serviceType</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#servicetype-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#servicetype-v1-core">
|
||||
Kubernetes core/v1.ServiceType
|
||||
</a>
|
||||
</em>
|
||||
|
@ -1109,7 +1109,7 @@ map[string]string
|
|||
<td>
|
||||
<code>ingressTLS</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#ingresstls-v1-networking">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#ingresstls-v1-networking">
|
||||
[]Kubernetes networking/v1.IngressTLS
|
||||
</a>
|
||||
</em>
|
||||
|
@ -1197,7 +1197,7 @@ GC settings or other logging.</p>
|
|||
<td>
|
||||
<code>lifecycle</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#lifecycle-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#lifecycle-v1-core">
|
||||
Kubernetes core/v1.Lifecycle
|
||||
</a>
|
||||
</em>
|
||||
|
@ -1438,7 +1438,7 @@ GC settings or other logging.</p>
|
|||
<td>
|
||||
<code>lifecycle</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#lifecycle-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#lifecycle-v1-core">
|
||||
Kubernetes core/v1.Lifecycle
|
||||
</a>
|
||||
</em>
|
||||
|
@ -2066,7 +2066,7 @@ Defaults to 1.</p>
|
|||
<td>
|
||||
<code>lastRun</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#time-v1-meta">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#time-v1-meta">
|
||||
Kubernetes meta/v1.Time
|
||||
</a>
|
||||
</em>
|
||||
|
@ -2079,7 +2079,7 @@ Kubernetes meta/v1.Time
|
|||
<td>
|
||||
<code>nextRun</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#time-v1-meta">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#time-v1-meta">
|
||||
Kubernetes meta/v1.Time
|
||||
</a>
|
||||
</em>
|
||||
|
@ -2423,7 +2423,7 @@ The controller will add environment variable HADOOP_CONF_DIR to the path where t
|
|||
<td>
|
||||
<code>volumes</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volume-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core">
|
||||
[]Kubernetes core/v1.Volume
|
||||
</a>
|
||||
</em>
|
||||
|
@ -2694,7 +2694,7 @@ string
|
|||
<td>
|
||||
<code>lastSubmissionAttemptTime</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#time-v1-meta">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#time-v1-meta">
|
||||
Kubernetes meta/v1.Time
|
||||
</a>
|
||||
</em>
|
||||
|
@ -2707,7 +2707,7 @@ Kubernetes meta/v1.Time
|
|||
<td>
|
||||
<code>terminationTime</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#time-v1-meta">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#time-v1-meta">
|
||||
Kubernetes meta/v1.Time
|
||||
</a>
|
||||
</em>
|
||||
|
@ -2929,7 +2929,7 @@ string
|
|||
<td>
|
||||
<code>env</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#envvar-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envvar-v1-core">
|
||||
[]Kubernetes core/v1.EnvVar
|
||||
</a>
|
||||
</em>
|
||||
|
@ -2956,7 +2956,7 @@ Deprecated. Consider using <code>env</code> instead.</p>
|
|||
<td>
|
||||
<code>envFrom</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#envfromsource-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core">
|
||||
[]Kubernetes core/v1.EnvFromSource
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3009,7 +3009,7 @@ map[string]string
|
|||
<td>
|
||||
<code>volumeMounts</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volumemount-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core">
|
||||
[]Kubernetes core/v1.VolumeMount
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3023,7 +3023,7 @@ map[string]string
|
|||
<td>
|
||||
<code>affinity</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#affinity-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#affinity-v1-core">
|
||||
Kubernetes core/v1.Affinity
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3037,7 +3037,7 @@ Kubernetes core/v1.Affinity
|
|||
<td>
|
||||
<code>tolerations</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#toleration-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#toleration-v1-core">
|
||||
[]Kubernetes core/v1.Toleration
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3051,7 +3051,7 @@ Kubernetes core/v1.Affinity
|
|||
<td>
|
||||
<code>podSecurityContext</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#podsecuritycontext-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#podsecuritycontext-v1-core">
|
||||
Kubernetes core/v1.PodSecurityContext
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3065,7 +3065,7 @@ Kubernetes core/v1.PodSecurityContext
|
|||
<td>
|
||||
<code>securityContext</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#securitycontext-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#securitycontext-v1-core">
|
||||
Kubernetes core/v1.SecurityContext
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3091,7 +3091,7 @@ string
|
|||
<td>
|
||||
<code>sidecars</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core">
|
||||
[]Kubernetes core/v1.Container
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3105,7 +3105,7 @@ string
|
|||
<td>
|
||||
<code>initContainers</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core">
|
||||
[]Kubernetes core/v1.Container
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3144,7 +3144,7 @@ This field is mutually exclusive with nodeSelector at SparkApplication level (wh
|
|||
<td>
|
||||
<code>dnsConfig</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#poddnsconfig-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#poddnsconfig-v1-core">
|
||||
Kubernetes core/v1.PodDNSConfig
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3182,7 +3182,7 @@ string
|
|||
<td>
|
||||
<code>hostAliases</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#hostalias-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#hostalias-v1-core">
|
||||
[]Kubernetes core/v1.HostAlias
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3253,7 +3253,7 @@ Defaults to spark-driver-ui-port.</p>
|
|||
<td>
|
||||
<code>serviceType</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#servicetype-v1-core">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#servicetype-v1-core">
|
||||
Kubernetes core/v1.ServiceType
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3303,7 +3303,7 @@ map[string]string
|
|||
<td>
|
||||
<code>ingressTLS</code><br/>
|
||||
<em>
|
||||
<a href="https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#ingresstls-v1-networking">
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#ingresstls-v1-networking">
|
||||
[]Kubernetes networking/v1.IngressTLS
|
||||
</a>
|
||||
</em>
|
||||
|
@ -3317,5 +3317,5 @@ map[string]string
|
|||
</table>
|
||||
<hr/>
|
||||
<p><em>
|
||||
Generated with <code>https://github.com/ahmetb/gen-crd-api-reference-docs.git</code> on git commit <code>ccf856504caaeac38151b57a950d3f8a7942b9db</code>.
|
||||
Generated with <code>gen-crd-api-reference-docs</code>.
|
||||
</em></p>
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
#
|
||||
# Copyright 2021 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
FROM golang:1.22.2-alpine
|
||||
RUN go install github.com/ahmetb/gen-crd-api-reference-docs@latest
|
|
@ -1,28 +0,0 @@
|
|||
{
|
||||
"hideMemberFields": [
|
||||
"TypeMeta"
|
||||
],
|
||||
"hideTypePatterns": [
|
||||
"ParseError$",
|
||||
"List$"
|
||||
],
|
||||
"externalPackages": [
|
||||
{
|
||||
"typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$",
|
||||
"docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration"
|
||||
},
|
||||
{
|
||||
"typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/",
|
||||
"docsURLTemplate": "https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}"
|
||||
},
|
||||
{
|
||||
"typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/",
|
||||
"docsURLTemplate": "https://pkg.go.dev/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}"
|
||||
}
|
||||
],
|
||||
"typeDisplayNamePrefixOverrides": {
|
||||
"k8s.io/api/": "Kubernetes ",
|
||||
"k8s.io/apimachinery/pkg/apis/": "Kubernetes "
|
||||
},
|
||||
"markdownDisabled": false
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
"hideMemberFields": [
|
||||
"TypeMeta"
|
||||
],
|
||||
"hideTypePatterns": [
|
||||
"ParseError$",
|
||||
"List$"
|
||||
],
|
||||
"externalPackages": [
|
||||
{
|
||||
"typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$",
|
||||
"docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration"
|
||||
},
|
||||
{
|
||||
"typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/",
|
||||
"docsURLTemplate": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}"
|
||||
},
|
||||
{
|
||||
"typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/",
|
||||
"docsURLTemplate": "https://pkg.go.dev/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}"
|
||||
}
|
||||
],
|
||||
"typeDisplayNamePrefixOverrides": {
|
||||
"k8s.io/api/": "Kubernetes ",
|
||||
"k8s.io/apimachinery/pkg/apis/": "Kubernetes "
|
||||
},
|
||||
"markdownDisabled": false
|
||||
}
|
|
@ -42,7 +42,7 @@
|
|||
{{ end }}
|
||||
|
||||
<p><em>
|
||||
Generated with <code>https://github.com/ahmetb/gen-crd-api-reference-docs.git</code> on git commit <code>ccf856504caaeac38151b57a950d3f8a7942b9db</code>.
|
||||
Generated with <code>gen-crd-api-reference-docs</code>.
|
||||
</em></p>
|
||||
|
||||
{{ end }}
|
|
@ -1,7 +1,5 @@
|
|||
// Code generated by k8s code-generator DO NOT EDIT.
|
||||
|
||||
/*
|
||||
Copyright 2018 Google LLC
|
||||
Copyright 2024 The Kubeflow authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -14,4 +12,4 @@ distributed under the License is distributed on an "AS IS" BASIS,
|
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
*/
|
|
@ -1,21 +0,0 @@
|
|||
#
|
||||
# Copyright 2018 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- sparkoperator.k8s.io_sparkapplications.yaml
|
||||
- sparkoperator.k8s.io_scheduledsparkapplications.yaml
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +kubebuilder:skip
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
|
@ -409,7 +411,7 @@ type SparkPodSpec struct {
|
|||
|
||||
// DriverSpec is specification of the driver.
|
||||
type DriverSpec struct {
|
||||
SparkPodSpec
|
||||
SparkPodSpec `json:",inline"`
|
||||
// PodName is the name of the driver pod that the user creates. This is used for the
|
||||
// in-cluster client mode in which the user creates a client pod where the driver of
|
||||
// the user application runs. It's an error to set this field if Mode is not
|
||||
|
@ -426,7 +428,7 @@ type DriverSpec struct {
|
|||
|
||||
// ExecutorSpec is specification of the executor.
|
||||
type ExecutorSpec struct {
|
||||
SparkPodSpec
|
||||
SparkPodSpec `json:",inline"`
|
||||
// Instances is the number of executor instances.
|
||||
// Optional.
|
||||
Instances *int32 `json:"instances,omitempty"`
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Code generated by k8s code-generator DO NOT EDIT.
|
||||
|
||||
/*
|
||||
Copyright 2018 Google LLC
|
||||
Copyright 2024 The Kubeflow authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -19,19 +16,18 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationState) DeepCopyInto(out *ApplicationState) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState.
|
||||
|
@ -82,7 +78,6 @@ func (in *Dependencies) DeepCopyInto(out *Dependencies) {
|
|||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies.
|
||||
|
@ -98,7 +93,6 @@ func (in *Dependencies) DeepCopy() *Dependencies {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DriverInfo) DeepCopyInto(out *DriverInfo) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo.
|
||||
|
@ -130,7 +124,6 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec.
|
||||
|
@ -162,7 +155,6 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec.
|
||||
|
@ -178,7 +170,6 @@ func (in *ExecutorSpec) DeepCopy() *ExecutorSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GPUSpec) DeepCopyInto(out *GPUSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec.
|
||||
|
@ -204,7 +195,6 @@ func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) {
|
|||
*out = new(PrometheusSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec.
|
||||
|
@ -220,7 +210,6 @@ func (in *MonitoringSpec) DeepCopy() *MonitoringSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NameKey) DeepCopyInto(out *NameKey) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey.
|
||||
|
@ -236,7 +225,6 @@ func (in *NameKey) DeepCopy() *NameKey {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamePath) DeepCopyInto(out *NamePath) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath.
|
||||
|
@ -267,7 +255,6 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec.
|
||||
|
@ -303,7 +290,6 @@ func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) {
|
|||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy.
|
||||
|
@ -323,7 +309,6 @@ func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication
|
|||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication.
|
||||
|
@ -356,7 +341,6 @@ func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplica
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList.
|
||||
|
@ -396,7 +380,6 @@ func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplica
|
|||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec.
|
||||
|
@ -424,7 +407,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkAppli
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus.
|
||||
|
@ -440,7 +422,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplication
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretInfo) DeepCopyInto(out *SecretInfo) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo.
|
||||
|
@ -460,7 +441,6 @@ func (in *SparkApplication) DeepCopyInto(out *SparkApplication) {
|
|||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication.
|
||||
|
@ -493,7 +473,6 @@ func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList.
|
||||
|
@ -624,7 +603,6 @@ func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec.
|
||||
|
@ -651,7 +629,6 @@ func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus.
|
||||
|
@ -788,7 +765,6 @@ func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) {
|
|||
*out = new(v1.PodDNSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec.
|
||||
|
|
|
@ -84,8 +84,14 @@ const (
|
|||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:defaulter-gen=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298"
|
||||
// +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=.spec.schedule,name=Schedule,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=.spec.suspend,name=Suspend,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=.status.lastRun,name=Last Run,type=date
|
||||
// +kubebuilder:printcolumn:JSONPath=.status.lastRunName,name=Last Run Name,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date
|
||||
|
||||
type ScheduledSparkApplication struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
@ -165,8 +171,14 @@ type ScheduledSparkApplicationList struct {
|
|||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:defaulter-gen=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298"
|
||||
// +kubebuilder:resource:scope=Namespaced,shortName=sparkapp,singular=sparkapplication
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=.status.applicationState.state,name=Status,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=.status.executionAttempts,name=Attempts,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=.status.lastSubmissionAttemptTime,name=Start,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=.status.terminationTime,name=Finish,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date
|
||||
|
||||
// SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager.
|
||||
type SparkApplication struct {
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Code generated by k8s code-generator DO NOT EDIT.
|
||||
|
||||
/*
|
||||
Copyright 2018 Google LLC
|
||||
Copyright 2024 The Kubeflow authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -19,20 +16,19 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationState) DeepCopyInto(out *ApplicationState) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState.
|
||||
|
@ -65,7 +61,6 @@ func (in *BatchSchedulerConfiguration) DeepCopyInto(out *BatchSchedulerConfigura
|
|||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchSchedulerConfiguration.
|
||||
|
@ -111,7 +106,6 @@ func (in *Dependencies) DeepCopyInto(out *Dependencies) {
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies.
|
||||
|
@ -127,7 +121,6 @@ func (in *Dependencies) DeepCopy() *Dependencies {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DriverInfo) DeepCopyInto(out *DriverInfo) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo.
|
||||
|
@ -140,6 +133,64 @@ func (in *DriverInfo) DeepCopy() *DriverInfo {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfiguration) {
|
||||
*out = *in
|
||||
if in.ServicePort != nil {
|
||||
in, out := &in.ServicePort, &out.ServicePort
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.ServicePortName != nil {
|
||||
in, out := &in.ServicePortName, &out.ServicePortName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.ServiceType != nil {
|
||||
in, out := &in.ServiceType, &out.ServiceType
|
||||
*out = new(v1.ServiceType)
|
||||
**out = **in
|
||||
}
|
||||
if in.ServiceAnnotations != nil {
|
||||
in, out := &in.ServiceAnnotations, &out.ServiceAnnotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.ServiceLabels != nil {
|
||||
in, out := &in.ServiceLabels, &out.ServiceLabels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.IngressAnnotations != nil {
|
||||
in, out := &in.IngressAnnotations, &out.IngressAnnotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.IngressTLS != nil {
|
||||
in, out := &in.IngressTLS, &out.IngressTLS
|
||||
*out = make([]networkingv1.IngressTLS, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverIngressConfiguration.
|
||||
func (in *DriverIngressConfiguration) DeepCopy() *DriverIngressConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DriverIngressConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DriverSpec) DeepCopyInto(out *DriverSpec) {
|
||||
*out = *in
|
||||
|
@ -176,12 +227,18 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.ServiceLabels != nil {
|
||||
in, out := &in.ServiceLabels, &out.ServiceLabels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]Port, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec.
|
||||
|
@ -217,7 +274,6 @@ func (in *DynamicAllocation) DeepCopyInto(out *DynamicAllocation) {
|
|||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicAllocation.
|
||||
|
@ -249,6 +305,11 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Lifecycle != nil {
|
||||
in, out := &in.Lifecycle, &out.Lifecycle
|
||||
*out = new(v1.Lifecycle)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DeleteOnTermination != nil {
|
||||
in, out := &in.DeleteOnTermination, &out.DeleteOnTermination
|
||||
*out = new(bool)
|
||||
|
@ -259,7 +320,6 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) {
|
|||
*out = make([]Port, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec.
|
||||
|
@ -275,7 +335,6 @@ func (in *ExecutorSpec) DeepCopy() *ExecutorSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GPUSpec) DeepCopyInto(out *GPUSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec.
|
||||
|
@ -306,7 +365,6 @@ func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) {
|
|||
*out = new(PrometheusSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec.
|
||||
|
@ -322,7 +380,6 @@ func (in *MonitoringSpec) DeepCopy() *MonitoringSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NameKey) DeepCopyInto(out *NameKey) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey.
|
||||
|
@ -338,7 +395,6 @@ func (in *NameKey) DeepCopy() *NameKey {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamePath) DeepCopyInto(out *NamePath) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath.
|
||||
|
@ -354,7 +410,6 @@ func (in *NamePath) DeepCopy() *NamePath {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Port) DeepCopyInto(out *Port) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port.
|
||||
|
@ -390,7 +445,6 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec.
|
||||
|
@ -426,7 +480,6 @@ func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) {
|
|||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy.
|
||||
|
@ -446,7 +499,6 @@ func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication
|
|||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication.
|
||||
|
@ -479,7 +531,6 @@ func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplica
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList.
|
||||
|
@ -519,7 +570,6 @@ func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplica
|
|||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec.
|
||||
|
@ -547,7 +597,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkAppli
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus.
|
||||
|
@ -563,7 +612,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplication
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretInfo) DeepCopyInto(out *SecretInfo) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo.
|
||||
|
@ -583,7 +631,6 @@ func (in *SparkApplication) DeepCopyInto(out *SparkApplication) {
|
|||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication.
|
||||
|
@ -616,7 +663,6 @@ func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList.
|
||||
|
@ -774,7 +820,6 @@ func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) {
|
|||
*out = new(DynamicAllocation)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec.
|
||||
|
@ -801,7 +846,6 @@ func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus.
|
||||
|
@ -986,7 +1030,6 @@ func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) {
|
|||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec.
|
||||
|
@ -1024,6 +1067,13 @@ func (in *SparkUIConfiguration) DeepCopyInto(out *SparkUIConfiguration) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.ServiceLabels != nil {
|
||||
in, out := &in.ServiceLabels, &out.ServiceLabels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.IngressAnnotations != nil {
|
||||
in, out := &in.IngressAnnotations, &out.IngressAnnotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
@ -1038,7 +1088,6 @@ func (in *SparkUIConfiguration) DeepCopyInto(out *SparkUIConfiguration) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkUIConfiguration.
|
||||
|
@ -1050,56 +1099,3 @@ func (in *SparkUIConfiguration) DeepCopy() *SparkUIConfiguration {
|
|||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfiguration) {
|
||||
*out = *in
|
||||
if in.ServicePort != nil {
|
||||
in, out := &in.ServicePort, &out.ServicePort
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.ServicePortName != nil {
|
||||
in, out := &in.ServicePortName, &out.ServicePortName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.ServiceType != nil {
|
||||
in, out := &in.ServiceType, &out.ServiceType
|
||||
*out = new(v1.ServiceType)
|
||||
**out = **in
|
||||
}
|
||||
if in.ServiceAnnotations != nil {
|
||||
in, out := &in.ServiceAnnotations, &out.ServiceAnnotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.IngressURLFormat = in.IngressURLFormat
|
||||
if in.IngressAnnotations != nil {
|
||||
in, out := &in.IngressAnnotations, &out.IngressAnnotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.IngressTLS != nil {
|
||||
in, out := &in.IngressTLS, &out.IngressTLS
|
||||
*out = make([]networkingv1.IngressTLS, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverIngressConfiguration.
|
||||
func (in *DriverIngressConfiguration) DeepCopy() *DriverIngressConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DriverIngressConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
Loading…
Reference in New Issue