Remove v1beta1 API

Signed-off-by: Yi Chen <github@chenyicn.net>
This commit is contained in:
Yi Chen 2025-04-27 11:40:56 +08:00
parent 5002c08dce
commit 60afa99995
19 changed files with 1 additions and 1618 deletions

16
PROJECT
View File

@ -8,22 +8,6 @@ layout:
projectName: spark-operator
repo: github.com/kubeflow/spark-operator
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: sparkoperator.k8s.io
kind: SparkApplication
path: github.com/kubeflow/spark-operator/api/v1beta1
version: v1beta1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: sparkoperator.k8s.io
kind: ScheduledSparkApplication
path: github.com/kubeflow/spark-operator/api/v1beta1
version: v1beta1
- api:
crdVersion: v1
namespaced: true

View File

@ -75,6 +75,7 @@ The following table lists the most recent few versions of the operator.
| Operator Version | API Version | Kubernetes Version | Base Spark Version |
|-----------------------|-------------|--------------------|--------------------|
| `v2.1.x` | `v1beta2` | 1.16+ | `3.5.3` |
| `v2.0.x` | `v1beta2` | 1.16+ | `3.5.2` |
| `v1beta2-1.6.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
| `v1beta2-1.5.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |

View File

@ -1,74 +0,0 @@
/*
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
// SetSparkApplicationDefaults sets default values for certain fields of a SparkApplication.
func SetSparkApplicationDefaults(app *SparkApplication) {
if app == nil {
return
}
if app.Spec.Mode == "" {
app.Spec.Mode = ClusterMode
}
if app.Spec.RestartPolicy.Type == "" {
app.Spec.RestartPolicy.Type = Never
}
if app.Spec.RestartPolicy.Type != Never {
// Default to 5 sec if the RestartPolicy is OnFailure or Always and these values aren't specified.
if app.Spec.RestartPolicy.OnFailureRetryInterval == nil {
app.Spec.RestartPolicy.OnFailureRetryInterval = new(int64)
*app.Spec.RestartPolicy.OnFailureRetryInterval = 5
}
if app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval == nil {
app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = new(int64)
*app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = 5
}
}
setDriverSpecDefaults(&app.Spec.Driver, app.Spec.SparkConf)
setExecutorSpecDefaults(&app.Spec.Executor, app.Spec.SparkConf)
}
func setDriverSpecDefaults(spec *DriverSpec, sparkConf map[string]string) {
if _, exists := sparkConf["spark.driver.cores"]; !exists && spec.Cores == nil {
spec.Cores = new(float32)
*spec.Cores = 1
}
if _, exists := sparkConf["spark.driver.memory"]; !exists && spec.Memory == nil {
spec.Memory = new(string)
*spec.Memory = "1g"
}
}
func setExecutorSpecDefaults(spec *ExecutorSpec, sparkConf map[string]string) {
if _, exists := sparkConf["spark.executor.cores"]; !exists && spec.Cores == nil {
spec.Cores = new(float32)
*spec.Cores = 1
}
if _, exists := sparkConf["spark.executor.memory"]; !exists && spec.Memory == nil {
spec.Memory = new(string)
*spec.Memory = "1g"
}
if _, exists := sparkConf["spark.executor.instances"]; !exists && spec.Instances == nil {
spec.Instances = new(int32)
*spec.Instances = 1
}
}

View File

@ -1,21 +0,0 @@
/*
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// Package v1beta1 is the v1beta1 version of the API.
// +groupName=sparkoperator.k8s.io
package v1beta1

View File

@ -1,36 +0,0 @@
/*
Copyright 2024 The Kubeflow authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1beta1 contains API Schema definitions for the v1beta1 API group
// +kubebuilder:object:generate=true
// +groupName=sparkoperator.k8s.io
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "sparkoperator.k8s.io", Version: "v1beta1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -1,34 +0,0 @@
/*
Copyright 2024 The Kubeflow authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
Group = "sparkoperator.k8s.io"
Version = "v1beta1"
)
// SchemeGroupVersion is the group version used to register these objects.
var SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version}
// Resource takes an unqualified resource and returns a Group-qualified GroupResource.
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}

View File

@ -1,104 +0,0 @@
/*
Copyright 2024 The Kubeflow authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// +kubebuilder:skip
func init() {
SchemeBuilder.Register(&ScheduledSparkApplication{}, &ScheduledSparkApplicationList{})
}
// ScheduledSparkApplicationSpec defines the desired state of ScheduledSparkApplication
type ScheduledSparkApplicationSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make generate" to regenerate code after modifying this file
// Schedule is a cron schedule on which the application should run.
Schedule string `json:"schedule"`
// Template is a template from which SparkApplication instances can be created.
Template SparkApplicationSpec `json:"template"`
// Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true.
// Optional.
// Defaults to false.
Suspend *bool `json:"suspend,omitempty"`
// ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.
ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
// SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep.
// Optional.
// Defaults to 1.
SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"`
// FailedRunHistoryLimit is the number of past failed runs of the application to keep.
// Optional.
// Defaults to 1.
FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"`
}
// ScheduledSparkApplicationStatus defines the observed state of ScheduledSparkApplication
type ScheduledSparkApplicationStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make generate" to regenerate code after modifying this file
// LastRun is the time when the last run of the application started.
LastRun metav1.Time `json:"lastRun,omitempty"`
// NextRun is the time when the next run of the application will start.
NextRun metav1.Time `json:"nextRun,omitempty"`
// LastRunName is the name of the SparkApplication for the most recent run of the application.
LastRunName string `json:"lastRunName,omitempty"`
// PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs.
PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"`
// PastFailedRunNames keeps the names of SparkApplications for past failed runs.
PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"`
// ScheduleState is the current scheduling state of the application.
ScheduleState ScheduleState `json:"scheduleState,omitempty"`
// Reason tells why the ScheduledSparkApplication is in the particular ScheduleState.
Reason string `json:"reason,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// ScheduledSparkApplication is the Schema for the scheduledsparkapplications API
type ScheduledSparkApplication struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScheduledSparkApplicationSpec `json:"spec,omitempty"`
Status ScheduledSparkApplicationStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// ScheduledSparkApplicationList contains a list of ScheduledSparkApplication
type ScheduledSparkApplicationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ScheduledSparkApplication `json:"items"`
}
type ScheduleState string
const (
FailedValidationState ScheduleState = "FailedValidation"
ScheduledState ScheduleState = "Scheduled"
)

View File

@ -1,497 +0,0 @@
/*
Copyright 2024 The Kubeflow authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +kubebuilder:skip
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// +kubebuilder:skip
func init() {
SchemeBuilder.Register(&SparkApplication{}, &SparkApplicationList{})
}
// SparkApplicationSpec defines the desired state of SparkApplication
type SparkApplicationSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make generate" to regenerate code after modifying this file
// Type tells the type of the Spark application.
Type SparkApplicationType `json:"type"`
// SparkVersion is the version of Spark the application uses.
SparkVersion string `json:"sparkVersion"`
// Mode is the deployment mode of the Spark application.
Mode DeployMode `json:"mode,omitempty"`
// Image is the container image for the driver, executor, and init-container. Any custom container images for the
// driver, executor, or init-container takes precedence over this.
// Optional.
Image *string `json:"image,omitempty"`
// InitContainerImage is the image of the init-container to use. Overrides Spec.Image if set.
// Optional.
InitContainerImage *string `json:"initContainerImage,omitempty"`
// ImagePullPolicy is the image pull policy for the driver, executor, and init-container.
// Optional.
ImagePullPolicy *string `json:"imagePullPolicy,omitempty"`
// ImagePullSecrets is the list of image-pull secrets.
// Optional.
ImagePullSecrets []string `json:"imagePullSecrets,omitempty"`
// MainClass is the fully-qualified main class of the Spark application.
// This only applies to Java/Scala Spark applications.
// Optional.
MainClass *string `json:"mainClass,omitempty"`
// MainFile is the path to a bundled JAR, Python, or R file of the application.
// Optional.
MainApplicationFile *string `json:"mainApplicationFile"`
// Arguments is a list of arguments to be passed to the application.
// Optional.
Arguments []string `json:"arguments,omitempty"`
// SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in
// spark-submit.
// Optional.
SparkConf map[string]string `json:"sparkConf,omitempty"`
// HadoopConf carries user-specified Hadoop configuration properties as they would use the "--conf" option
// in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop
// configuration properties.
// Optional.
HadoopConf map[string]string `json:"hadoopConf,omitempty"`
// SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties.
// The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to.
// Optional.
SparkConfigMap *string `json:"sparkConfigMap,omitempty"`
// HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml.
// The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to.
// Optional.
HadoopConfigMap *string `json:"hadoopConfigMap,omitempty"`
// Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors.
// Optional.
Volumes []corev1.Volume `json:"volumes,omitempty"`
// Driver is the driver specification.
Driver DriverSpec `json:"driver"`
// Executor is the executor specification.
Executor ExecutorSpec `json:"executor"`
// Deps captures all possible types of dependencies of a Spark application.
Deps Dependencies `json:"deps"`
// RestartPolicy defines the policy on if and in which conditions the controller should restart an application.
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"`
// NodeSelector is the Kubernetes node selector to be added to the driver and executor pods.
// This field is mutually exclusive with nodeSelector at podSpec level (driver or executor).
// This field will be deprecated in future versions (at SparkApplicationSpec level).
// Optional.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// FailureRetries is the number of times to retry a failed application before giving up.
// This is best effort and actual retry attempts can be >= the value specified.
// Optional.
FailureRetries *int32 `json:"failureRetries,omitempty"`
// RetryInterval is the unit of intervals in seconds between submission retries.
// Optional.
RetryInterval *int64 `json:"retryInterval,omitempty"`
// This sets the major Python version of the docker
// image used to run the driver and executor containers. Can either be 2 or 3, default 2.
// Optional.
PythonVersion *string `json:"pythonVersion,omitempty"`
// This sets the Memory Overhead Factor that will allocate memory to non-JVM memory.
// For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will
// be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set.
// Optional.
MemoryOverheadFactor *string `json:"memoryOverheadFactor,omitempty"`
// Monitoring configures how monitoring is handled.
// Optional.
Monitoring *MonitoringSpec `json:"monitoring,omitempty"`
// BatchScheduler configures which batch scheduler will be used for scheduling
// Optional.
BatchScheduler *string `json:"batchScheduler,omitempty"`
}
// SparkApplicationStatus defines the observed state of SparkApplication
type SparkApplicationStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make generate" to regenerate code after modifying this file
// SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods
SparkApplicationID string `json:"sparkApplicationId,omitempty"`
// SubmissionID is a unique ID of the current submission of the application.
SubmissionID string `json:"submissionID,omitempty"`
// LastSubmissionAttemptTime is the time for the last application submission attempt.
LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"`
// CompletionTime is the time when the application runs to completion if it does.
TerminationTime metav1.Time `json:"terminationTime,omitempty"`
// DriverInfo has information about the driver.
DriverInfo DriverInfo `json:"driverInfo"`
// AppState tells the overall application state.
AppState ApplicationState `json:"applicationState,omitempty"`
// ExecutorState records the state of executors by executor Pod names.
ExecutorState map[string]ExecutorState `json:"executorState,omitempty"`
// ExecutionAttempts is the total number of attempts to run a submitted application to completion.
// Incremented upon each attempted run of the application and reset upon invalidation.
ExecutionAttempts int32 `json:"executionAttempts,omitempty"`
// SubmissionAttempts is the total number of attempts to submit an application to run.
// Incremented upon each attempted submission of the application and reset upon invalidation and rerun.
SubmissionAttempts int32 `json:"submissionAttempts,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// SparkApplication is the Schema for the sparkapplications API
type SparkApplication struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SparkApplicationSpec `json:"spec,omitempty"`
Status SparkApplicationStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// SparkApplicationList contains a list of SparkApplication
type SparkApplicationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SparkApplication `json:"items"`
}
// SparkApplicationType describes the type of a Spark application.
type SparkApplicationType string
// Different types of Spark applications.
const (
JavaApplicationType SparkApplicationType = "Java"
ScalaApplicationType SparkApplicationType = "Scala"
PythonApplicationType SparkApplicationType = "Python"
RApplicationType SparkApplicationType = "R"
)
// DeployMode describes the type of deployment of a Spark application.
type DeployMode string
// Different types of deployments.
const (
ClusterMode DeployMode = "cluster"
ClientMode DeployMode = "client"
InClusterClientMode DeployMode = "in-cluster-client"
)
// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application.
// This completely defines actions to be taken on any kind of Failures during an application run.
type RestartPolicy struct {
Type RestartPolicyType `json:"type,omitempty"`
// FailureRetries are the number of times to retry a failed application before giving up in a particular case.
// This is best effort and actual retry attempts can be >= the value specified due to caching.
// These are required if RestartPolicy is OnFailure.
OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"`
OnFailureRetries *int32 `json:"onFailureRetries,omitempty"`
// Interval to wait between successive retries of a failed application.
OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"`
OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"`
}
type RestartPolicyType string
const (
Never RestartPolicyType = "Never"
OnFailure RestartPolicyType = "OnFailure"
Always RestartPolicyType = "Always"
)
type ConcurrencyPolicy string
const (
// ConcurrencyAllow allows SparkApplications to run concurrently.
ConcurrencyAllow ConcurrencyPolicy = "Allow"
// ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous
// one hasn't finished yet.
ConcurrencyForbid ConcurrencyPolicy = "Forbid"
// ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one.
ConcurrencyReplace ConcurrencyPolicy = "Replace"
)
// ApplicationStateType represents the type of the current state of an application.
type ApplicationStateType string
// Different states an application may have.
const (
NewState ApplicationStateType = ""
SubmittedState ApplicationStateType = "SUBMITTED"
RunningState ApplicationStateType = "RUNNING"
CompletedState ApplicationStateType = "COMPLETED"
FailedState ApplicationStateType = "FAILED"
FailedSubmissionState ApplicationStateType = "SUBMISSION_FAILED"
PendingRerunState ApplicationStateType = "PENDING_RERUN"
InvalidatingState ApplicationStateType = "INVALIDATING"
SucceedingState ApplicationStateType = "SUCCEEDING"
FailingState ApplicationStateType = "FAILING"
UnknownState ApplicationStateType = "UNKNOWN"
)
// ApplicationState tells the current state of the application and an error message in case of failures.
type ApplicationState struct {
State ApplicationStateType `json:"state"`
ErrorMessage string `json:"errorMessage,omitempty"`
}
// ExecutorState tells the current state of an executor.
type ExecutorState string
// Different states an executor may have.
const (
ExecutorPendingState ExecutorState = "PENDING"
ExecutorRunningState ExecutorState = "RUNNING"
ExecutorCompletedState ExecutorState = "COMPLETED"
ExecutorFailedState ExecutorState = "FAILED"
ExecutorUnknownState ExecutorState = "UNKNOWN"
)
// Dependencies specifies all possible types of dependencies of a Spark application.
type Dependencies struct {
// Jars is a list of JAR files the Spark application depends on.
// Optional.
Jars []string `json:"jars,omitempty"`
// Files is a list of files the Spark application depends on.
// Optional.
Files []string `json:"files,omitempty"`
// PyFiles is a list of Python files the Spark application depends on.
// Optional.
PyFiles []string `json:"pyFiles,omitempty"`
// JarsDownloadDir is the location to download jars to in the driver and executors.
JarsDownloadDir *string `json:"jarsDownloadDir,omitempty"`
// FilesDownloadDir is the location to download files to in the driver and executors.
FilesDownloadDir *string `json:"filesDownloadDir,omitempty"`
// DownloadTimeout specifies the timeout in seconds before aborting the attempt to download
// and unpack dependencies from remote locations into the driver and executor pods.
DownloadTimeout *int32 `json:"downloadTimeout,omitempty"`
// MaxSimultaneousDownloads specifies the maximum number of remote dependencies to download
// simultaneously in a driver or executor pod.
MaxSimultaneousDownloads *int32 `json:"maxSimultaneousDownloads,omitempty"`
}
// SparkPodSpec defines common things that can be customized for a Spark driver or executor pod.
// TODO: investigate if we should use v1.PodSpec and limit what can be set instead.
type SparkPodSpec struct {
// Cores is the number of CPU cores to request for the pod.
// Optional.
Cores *float32 `json:"cores,omitempty"`
// CoreLimit specifies a hard limit on CPU cores for the pod.
// Optional
CoreLimit *string `json:"coreLimit,omitempty"`
// Memory is the amount of memory to request for the pod.
// Optional.
Memory *string `json:"memory,omitempty"`
// MemoryOverhead is the amount of off-heap memory to allocate in cluster mode, in MiB unless otherwise specified.
// Optional.
MemoryOverhead *string `json:"memoryOverhead,omitempty"`
// GPU specifies GPU requirement for the pod.
// Optional.
GPU *GPUSpec `json:"gpu,omitempty"`
// Image is the container image to use. Overrides Spec.Image if set.
// Optional.
Image *string `json:"image,omitempty"`
// ConfigMaps carries information of other ConfigMaps to add to the pod.
// Optional.
ConfigMaps []NamePath `json:"configMaps,omitempty"`
// Secrets carries information of secrets to add to the pod.
// Optional.
Secrets []SecretInfo `json:"secrets,omitempty"`
// EnvVars carries the environment variables to add to the pod.
// Optional.
EnvVars map[string]string `json:"envVars,omitempty"`
// EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs.
// Optional.
EnvSecretKeyRefs map[string]NameKey `json:"envSecretKeyRefs,omitempty"`
// Labels are the Kubernetes labels to be added to the pod.
// Optional.
Labels map[string]string `json:"labels,omitempty"`
// Annotations are the Kubernetes annotations to be added to the pod.
// Optional.
Annotations map[string]string `json:"annotations,omitempty"`
// VolumeMounts specifies the volumes listed in ".spec.volumes" to mount into the main container's filesystem.
// Optional.
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
// Affinity specifies the affinity/anti-affinity settings for the pod.
// Optional.
Affinity *corev1.Affinity `json:"affinity,omitempty"`
// Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod.
// Optional.
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
// SecurityContext specifies the PodSecurityContext to apply.
// Optional.
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
// SchedulerName specifies the scheduler that will be used for scheduling
// Optional.
SchedulerName *string `json:"schedulerName,omitempty"`
// Sidecars is a list of sidecar containers that run along side the main Spark container.
// Optional.
Sidecars []corev1.Container `json:"sidecars,omitempty"`
// HostNetwork indicates whether to request host networking for the pod or not.
// Optional.
HostNetwork *bool `json:"hostNetwork,omitempty"`
// NodeSelector is the Kubernetes node selector to be added to the driver and executor pods.
// This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated).
// Optional.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// DnsConfig dns settings for the pod, following the Kubernetes specifications.
// Optional.
DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
}
// DriverSpec is specification of the driver.
type DriverSpec struct {
SparkPodSpec `json:",inline"`
// PodName is the name of the driver pod that the user creates. This is used for the
// in-cluster client mode in which the user creates a client pod where the driver of
// the user application runs. It's an error to set this field if Mode is not
// in-cluster-client.
// Optional.
PodName *string `json:"podName,omitempty"`
// ServiceAccount is the name of the Kubernetes service account used by the driver pod
// when requesting executor pods from the API server.
ServiceAccount *string `json:"serviceAccount,omitempty"`
// JavaOptions is a string of extra JVM options to pass to the driver. For instance,
// GC settings or other logging.
JavaOptions *string `json:"javaOptions,omitempty"`
}
// ExecutorSpec is specification of the executor.
type ExecutorSpec struct {
SparkPodSpec `json:",inline"`
// Instances is the number of executor instances.
// Optional.
Instances *int32 `json:"instances,omitempty"`
// CoreRequest is the physical CPU core request for the executors.
// Optional.
CoreRequest *string `json:"coreRequest,omitempty"`
// JavaOptions is a string of extra JVM options to pass to the executors. For instance,
// GC settings or other logging.
JavaOptions *string `json:"javaOptions,omitempty"`
}
// NamePath is a pair of a name and a path to which the named objects should be mounted to.
type NamePath struct {
Name string `json:"name"`
Path string `json:"path"`
}
// SecretType tells the type of a secret.
type SecretType string
// An enumeration of secret types supported.
const (
// GCPServiceAccountSecret is for secrets from a GCP service account Json key file that needs
// the environment variable GOOGLE_APPLICATION_CREDENTIALS.
GCPServiceAccountSecret SecretType = "GCPServiceAccount"
// HadoopDelegationTokenSecret is for secrets from an Hadoop delegation token that needs the
// environment variable HADOOP_TOKEN_FILE_LOCATION.
HadoopDelegationTokenSecret SecretType = "HadoopDelegationToken"
// GenericType is for secrets that needs no special handling.
GenericType SecretType = "Generic"
)
// DriverInfo captures information about the driver.
type DriverInfo struct {
WebUIServiceName string `json:"webUIServiceName,omitempty"`
// UI Details for the UI created via ClusterIP service accessible from within the cluster.
WebUIPort int32 `json:"webUIPort,omitempty"`
WebUIAddress string `json:"webUIAddress,omitempty"`
// Ingress Details if an ingress for the UI was created.
WebUIIngressName string `json:"webUIIngressName,omitempty"`
WebUIIngressAddress string `json:"webUIIngressAddress,omitempty"`
PodName string `json:"podName,omitempty"`
}
// SecretInfo captures information of a secret.
type SecretInfo struct {
Name string `json:"name"`
Path string `json:"path"`
Type SecretType `json:"secretType"`
}
// NameKey represents the name and key of a SecretKeyRef.
type NameKey struct {
Name string `json:"name"`
Key string `json:"key"`
}
// MonitoringSpec defines the monitoring specification.
type MonitoringSpec struct {
// ExposeDriverMetrics specifies whether to expose metrics on the driver.
ExposeDriverMetrics bool `json:"exposeDriverMetrics"`
// ExposeExecutorMetrics specifies whether to expose metrics on the executors.
ExposeExecutorMetrics bool `json:"exposeExecutorMetrics"`
// MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system.
// Optional.
// If not specified, the content in spark-docker/conf/metrics.properties will be used.
MetricsProperties *string `json:"metricsProperties,omitempty"`
// Prometheus is for configuring the Prometheus JMX exporter.
// Optional.
Prometheus *PrometheusSpec `json:"prometheus,omitempty"`
}
// PrometheusSpec defines the Prometheus specification when Prometheus is to be used for
// collecting and exposing metrics.
type PrometheusSpec struct {
// JmxExporterJar is the path to the Prometheus JMX exporter jar in the container.
JmxExporterJar string `json:"jmxExporterJar"`
// Port is the port of the HTTP server run by the Prometheus JMX exporter.
// Optional.
// If not specified, 8090 will be used as the default.
Port *int32 `json:"port"`
// ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image.
// ConfigFile takes precedence over Configuration, which is shown below.
ConfigFile *string `json:"configFile,omitempty"`
// Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter.
// Optional.
// If not specified, the content in spark-docker/conf/prometheus.yaml will be used.
// Configuration has no effect if ConfigFile is set.
Configuration *string `json:"configuration,omitempty"`
}
type GPUSpec struct {
// Name is GPU resource name, such as: nvidia.com/gpu or amd.com/gpu
Name string `json:"name"`
// Quantity is the number of GPUs to request for driver or executor.
Quantity int64 `json:"quantity"`
}
// PrometheusMonitoringEnabled returns if Prometheus monitoring is enabled or not.
func (s *SparkApplication) PrometheusMonitoringEnabled() bool {
return s.Spec.Monitoring != nil && s.Spec.Monitoring.Prometheus != nil
}
// HasPrometheusConfigFile returns if Prometheus monitoring uses a configuration file in the container.
func (s *SparkApplication) HasPrometheusConfigFile() bool {
return s.PrometheusMonitoringEnabled() &&
s.Spec.Monitoring.Prometheus.ConfigFile != nil &&
*s.Spec.Monitoring.Prometheus.ConfigFile != ""
}
// ExposeDriverMetrics returns if driver metrics should be exposed.
func (s *SparkApplication) ExposeDriverMetrics() bool {
return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeDriverMetrics
}
// ExposeExecutorMetrics returns if executor metrics should be exposed.
func (s *SparkApplication) ExposeExecutorMetrics() bool {
return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeExecutorMetrics
}

View File

@ -1,778 +0,0 @@
//go:build !ignore_autogenerated
/*
Copyright 2024 The Kubeflow authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1beta1
import (
"k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationState) DeepCopyInto(out *ApplicationState) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState.
func (in *ApplicationState) DeepCopy() *ApplicationState {
if in == nil {
return nil
}
out := new(ApplicationState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Dependencies) DeepCopyInto(out *Dependencies) {
*out = *in
if in.Jars != nil {
in, out := &in.Jars, &out.Jars
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Files != nil {
in, out := &in.Files, &out.Files
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PyFiles != nil {
in, out := &in.PyFiles, &out.PyFiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.JarsDownloadDir != nil {
in, out := &in.JarsDownloadDir, &out.JarsDownloadDir
*out = new(string)
**out = **in
}
if in.FilesDownloadDir != nil {
in, out := &in.FilesDownloadDir, &out.FilesDownloadDir
*out = new(string)
**out = **in
}
if in.DownloadTimeout != nil {
in, out := &in.DownloadTimeout, &out.DownloadTimeout
*out = new(int32)
**out = **in
}
if in.MaxSimultaneousDownloads != nil {
in, out := &in.MaxSimultaneousDownloads, &out.MaxSimultaneousDownloads
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies.
func (in *Dependencies) DeepCopy() *Dependencies {
if in == nil {
return nil
}
out := new(Dependencies)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DriverInfo) DeepCopyInto(out *DriverInfo) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo.
func (in *DriverInfo) DeepCopy() *DriverInfo {
if in == nil {
return nil
}
out := new(DriverInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DriverSpec) DeepCopyInto(out *DriverSpec) {
*out = *in
in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec)
if in.PodName != nil {
in, out := &in.PodName, &out.PodName
*out = new(string)
**out = **in
}
if in.ServiceAccount != nil {
in, out := &in.ServiceAccount, &out.ServiceAccount
*out = new(string)
**out = **in
}
if in.JavaOptions != nil {
in, out := &in.JavaOptions, &out.JavaOptions
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec.
func (in *DriverSpec) DeepCopy() *DriverSpec {
if in == nil {
return nil
}
out := new(DriverSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) {
*out = *in
in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec)
if in.Instances != nil {
in, out := &in.Instances, &out.Instances
*out = new(int32)
**out = **in
}
if in.CoreRequest != nil {
in, out := &in.CoreRequest, &out.CoreRequest
*out = new(string)
**out = **in
}
if in.JavaOptions != nil {
in, out := &in.JavaOptions, &out.JavaOptions
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec.
func (in *ExecutorSpec) DeepCopy() *ExecutorSpec {
if in == nil {
return nil
}
out := new(ExecutorSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GPUSpec) DeepCopyInto(out *GPUSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec.
func (in *GPUSpec) DeepCopy() *GPUSpec {
if in == nil {
return nil
}
out := new(GPUSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) {
*out = *in
if in.MetricsProperties != nil {
in, out := &in.MetricsProperties, &out.MetricsProperties
*out = new(string)
**out = **in
}
if in.Prometheus != nil {
in, out := &in.Prometheus, &out.Prometheus
*out = new(PrometheusSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec.
func (in *MonitoringSpec) DeepCopy() *MonitoringSpec {
if in == nil {
return nil
}
out := new(MonitoringSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NameKey) DeepCopyInto(out *NameKey) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey.
func (in *NameKey) DeepCopy() *NameKey {
if in == nil {
return nil
}
out := new(NameKey)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamePath) DeepCopyInto(out *NamePath) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath.
func (in *NamePath) DeepCopy() *NamePath {
if in == nil {
return nil
}
out := new(NamePath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) {
*out = *in
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(int32)
**out = **in
}
if in.ConfigFile != nil {
in, out := &in.ConfigFile, &out.ConfigFile
*out = new(string)
**out = **in
}
if in.Configuration != nil {
in, out := &in.Configuration, &out.Configuration
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec.
func (in *PrometheusSpec) DeepCopy() *PrometheusSpec {
if in == nil {
return nil
}
out := new(PrometheusSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) {
*out = *in
if in.OnSubmissionFailureRetries != nil {
in, out := &in.OnSubmissionFailureRetries, &out.OnSubmissionFailureRetries
*out = new(int32)
**out = **in
}
if in.OnFailureRetries != nil {
in, out := &in.OnFailureRetries, &out.OnFailureRetries
*out = new(int32)
**out = **in
}
if in.OnSubmissionFailureRetryInterval != nil {
in, out := &in.OnSubmissionFailureRetryInterval, &out.OnSubmissionFailureRetryInterval
*out = new(int64)
**out = **in
}
if in.OnFailureRetryInterval != nil {
in, out := &in.OnFailureRetryInterval, &out.OnFailureRetryInterval
*out = new(int64)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy.
func (in *RestartPolicy) DeepCopy() *RestartPolicy {
if in == nil {
return nil
}
out := new(RestartPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication.
func (in *ScheduledSparkApplication) DeepCopy() *ScheduledSparkApplication {
if in == nil {
return nil
}
out := new(ScheduledSparkApplication)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScheduledSparkApplication) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplicationList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScheduledSparkApplication, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList.
func (in *ScheduledSparkApplicationList) DeepCopy() *ScheduledSparkApplicationList {
if in == nil {
return nil
}
out := new(ScheduledSparkApplicationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScheduledSparkApplicationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplicationSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
if in.SuccessfulRunHistoryLimit != nil {
in, out := &in.SuccessfulRunHistoryLimit, &out.SuccessfulRunHistoryLimit
*out = new(int32)
**out = **in
}
if in.FailedRunHistoryLimit != nil {
in, out := &in.FailedRunHistoryLimit, &out.FailedRunHistoryLimit
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec.
func (in *ScheduledSparkApplicationSpec) DeepCopy() *ScheduledSparkApplicationSpec {
if in == nil {
return nil
}
out := new(ScheduledSparkApplicationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkApplicationStatus) {
*out = *in
in.LastRun.DeepCopyInto(&out.LastRun)
in.NextRun.DeepCopyInto(&out.NextRun)
if in.PastSuccessfulRunNames != nil {
in, out := &in.PastSuccessfulRunNames, &out.PastSuccessfulRunNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PastFailedRunNames != nil {
in, out := &in.PastFailedRunNames, &out.PastFailedRunNames
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus.
func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplicationStatus {
if in == nil {
return nil
}
out := new(ScheduledSparkApplicationStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretInfo) DeepCopyInto(out *SecretInfo) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo.
func (in *SecretInfo) DeepCopy() *SecretInfo {
if in == nil {
return nil
}
out := new(SecretInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SparkApplication) DeepCopyInto(out *SparkApplication) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication.
func (in *SparkApplication) DeepCopy() *SparkApplication {
if in == nil {
return nil
}
out := new(SparkApplication)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SparkApplication) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]SparkApplication, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList.
func (in *SparkApplicationList) DeepCopy() *SparkApplicationList {
if in == nil {
return nil
}
out := new(SparkApplicationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SparkApplicationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) {
*out = *in
if in.Image != nil {
in, out := &in.Image, &out.Image
*out = new(string)
**out = **in
}
if in.InitContainerImage != nil {
in, out := &in.InitContainerImage, &out.InitContainerImage
*out = new(string)
**out = **in
}
if in.ImagePullPolicy != nil {
in, out := &in.ImagePullPolicy, &out.ImagePullPolicy
*out = new(string)
**out = **in
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MainClass != nil {
in, out := &in.MainClass, &out.MainClass
*out = new(string)
**out = **in
}
if in.MainApplicationFile != nil {
in, out := &in.MainApplicationFile, &out.MainApplicationFile
*out = new(string)
**out = **in
}
if in.Arguments != nil {
in, out := &in.Arguments, &out.Arguments
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SparkConf != nil {
in, out := &in.SparkConf, &out.SparkConf
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.HadoopConf != nil {
in, out := &in.HadoopConf, &out.HadoopConf
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.SparkConfigMap != nil {
in, out := &in.SparkConfigMap, &out.SparkConfigMap
*out = new(string)
**out = **in
}
if in.HadoopConfigMap != nil {
in, out := &in.HadoopConfigMap, &out.HadoopConfigMap
*out = new(string)
**out = **in
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Driver.DeepCopyInto(&out.Driver)
in.Executor.DeepCopyInto(&out.Executor)
in.Deps.DeepCopyInto(&out.Deps)
in.RestartPolicy.DeepCopyInto(&out.RestartPolicy)
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.FailureRetries != nil {
in, out := &in.FailureRetries, &out.FailureRetries
*out = new(int32)
**out = **in
}
if in.RetryInterval != nil {
in, out := &in.RetryInterval, &out.RetryInterval
*out = new(int64)
**out = **in
}
if in.PythonVersion != nil {
in, out := &in.PythonVersion, &out.PythonVersion
*out = new(string)
**out = **in
}
if in.MemoryOverheadFactor != nil {
in, out := &in.MemoryOverheadFactor, &out.MemoryOverheadFactor
*out = new(string)
**out = **in
}
if in.Monitoring != nil {
in, out := &in.Monitoring, &out.Monitoring
*out = new(MonitoringSpec)
(*in).DeepCopyInto(*out)
}
if in.BatchScheduler != nil {
in, out := &in.BatchScheduler, &out.BatchScheduler
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec.
func (in *SparkApplicationSpec) DeepCopy() *SparkApplicationSpec {
if in == nil {
return nil
}
out := new(SparkApplicationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) {
*out = *in
in.LastSubmissionAttemptTime.DeepCopyInto(&out.LastSubmissionAttemptTime)
in.TerminationTime.DeepCopyInto(&out.TerminationTime)
out.DriverInfo = in.DriverInfo
out.AppState = in.AppState
if in.ExecutorState != nil {
in, out := &in.ExecutorState, &out.ExecutorState
*out = make(map[string]ExecutorState, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus.
func (in *SparkApplicationStatus) DeepCopy() *SparkApplicationStatus {
if in == nil {
return nil
}
out := new(SparkApplicationStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) {
*out = *in
if in.Cores != nil {
in, out := &in.Cores, &out.Cores
*out = new(float32)
**out = **in
}
if in.CoreLimit != nil {
in, out := &in.CoreLimit, &out.CoreLimit
*out = new(string)
**out = **in
}
if in.Memory != nil {
in, out := &in.Memory, &out.Memory
*out = new(string)
**out = **in
}
if in.MemoryOverhead != nil {
in, out := &in.MemoryOverhead, &out.MemoryOverhead
*out = new(string)
**out = **in
}
if in.GPU != nil {
in, out := &in.GPU, &out.GPU
*out = new(GPUSpec)
**out = **in
}
if in.Image != nil {
in, out := &in.Image, &out.Image
*out = new(string)
**out = **in
}
if in.ConfigMaps != nil {
in, out := &in.ConfigMaps, &out.ConfigMaps
*out = make([]NamePath, len(*in))
copy(*out, *in)
}
if in.Secrets != nil {
in, out := &in.Secrets, &out.Secrets
*out = make([]SecretInfo, len(*in))
copy(*out, *in)
}
if in.EnvVars != nil {
in, out := &in.EnvVars, &out.EnvVars
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EnvSecretKeyRefs != nil {
in, out := &in.EnvSecretKeyRefs, &out.EnvSecretKeyRefs
*out = make(map[string]NameKey, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.PodSecurityContext)
(*in).DeepCopyInto(*out)
}
if in.SchedulerName != nil {
in, out := &in.SchedulerName, &out.SchedulerName
*out = new(string)
**out = **in
}
if in.Sidecars != nil {
in, out := &in.Sidecars, &out.Sidecars
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.HostNetwork != nil {
in, out := &in.HostNetwork, &out.HostNetwork
*out = new(bool)
**out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.DNSConfig != nil {
in, out := &in.DNSConfig, &out.DNSConfig
*out = new(v1.PodDNSConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec.
func (in *SparkPodSpec) DeepCopy() *SparkPodSpec {
if in == nil {
return nil
}
out := new(SparkPodSpec)
in.DeepCopyInto(out)
return out
}

View File

@ -50,7 +50,6 @@ import (
schedulingv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1"
sparkoperator "github.com/kubeflow/spark-operator"
"github.com/kubeflow/spark-operator/api/v1beta1"
"github.com/kubeflow/spark-operator/api/v1beta2"
"github.com/kubeflow/spark-operator/internal/controller/scheduledsparkapplication"
"github.com/kubeflow/spark-operator/internal/controller/sparkapplication"
@ -122,7 +121,6 @@ func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(schedulingv1alpha1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1beta2.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}

View File

@ -49,7 +49,6 @@ import (
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
sparkoperator "github.com/kubeflow/spark-operator"
"github.com/kubeflow/spark-operator/api/v1beta1"
"github.com/kubeflow/spark-operator/api/v1beta2"
"github.com/kubeflow/spark-operator/internal/controller/mutatingwebhookconfiguration"
"github.com/kubeflow/spark-operator/internal/controller/validatingwebhookconfiguration"
@ -111,7 +110,6 @@ var (
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1beta2.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}

View File

@ -1,7 +1,5 @@
## Append samples of your project ##
resources:
- v1beta1_sparkapplication.yaml
- v1beta1_scheduledsparkapplication.yaml
- v1beta2_sparkapplication.yaml
- v1beta2_scheduledsparkapplication.yaml
# +kubebuilder:scaffold:manifestskustomizesamples

View File

@ -1,9 +0,0 @@
apiVersion: sparkoperator.k8s.io/v1beta1
kind: ScheduledSparkApplication
metadata:
labels:
app.kubernetes.io/name: spark-operator
app.kubernetes.io/managed-by: kustomize
name: scheduledsparkapplication-sample
spec:
# TODO(user): Add fields here

View File

@ -1,23 +0,0 @@
apiVersion: sparkoperator.k8s.io/v1beta1
kind: SparkApplication
metadata:
labels:
app.kubernetes.io/name: spark-operator
app.kubernetes.io/managed-by: kustomize
name: sparkapplication-sample
spec:
type: Scala
mode: cluster
image: spark:3.5.5
imagePullPolicy: IfNotPresent
mainClass: org.apache.spark.examples.SparkPi
mainApplicationFile: local:///opt/spark/examples/jars/spark-examples.jar
sparkVersion: 3.5.5
driver:
labels:
version: 3.5.5
serviceAccount: spark-operator-spark
executor:
labels:
version: 3.5.5
instances: 1

View File

@ -32,7 +32,6 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/kubeflow/spark-operator/api/v1beta1"
"github.com/kubeflow/spark-operator/api/v1beta2"
// +kubebuilder:scaffold:imports
)
@ -76,9 +75,6 @@ var _ = BeforeSuite(func() {
err = v1beta2.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})

View File

@ -32,7 +32,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/kubeflow/spark-operator/api/v1beta1"
"github.com/kubeflow/spark-operator/api/v1beta2"
// +kubebuilder:scaffold:imports
)
@ -76,9 +75,6 @@ var _ = BeforeSuite(func() {
err = v1beta2.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})

View File

@ -40,7 +40,6 @@ import (
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"github.com/kubeflow/spark-operator/api/v1beta1"
"github.com/kubeflow/spark-operator/api/v1beta2"
// +kubebuilder:scaffold:imports
)
@ -92,9 +91,6 @@ var _ = BeforeSuite(func() {
err = v1beta2.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
err = admissionv1.AddToScheme(scheme.Scheme)

View File

@ -32,7 +32,6 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/kubeflow/spark-operator/api/v1beta1"
"github.com/kubeflow/spark-operator/api/v1beta2"
// +kubebuilder:scaffold:imports
)
@ -76,9 +75,6 @@ var _ = BeforeSuite(func() {
err = v1beta2.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})

View File

@ -46,7 +46,6 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/kubeflow/spark-operator/api/v1beta1"
"github.com/kubeflow/spark-operator/api/v1beta2"
"github.com/kubeflow/spark-operator/pkg/util"
// +kubebuilder:scaffold:imports
@ -105,9 +104,6 @@ var _ = BeforeSuite(func() {
err = v1beta2.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = v1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})