Adding support for setting spark job namespaces to all namespaces (#2123)

Signed-off-by: Yi Chen <github@chenyicn.net>
This commit is contained in:
Yi Chen 2024-09-03 10:55:14 +08:00 committed by GitHub
parent 1afa72e7a0
commit c93b0ec0e7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 121 additions and 62 deletions

View File

@ -57,8 +57,12 @@ spec:
- --zap-log-level={{ . }}
{{- end }}
{{- with .Values.spark.jobNamespaces }}
{{- if has "" . }}
- --namespaces=""
{{- else }}
- --namespaces={{ . | join "," }}
{{- end }}
{{- end }}
- --controller-threads={{ .Values.controller.workers }}
{{- with .Values.controller.uiService.enable }}
- --enable-ui-service=true

View File

@ -16,7 +16,7 @@ limitations under the License.
{{- if .Values.spark.rbac.create -}}
{{- range $jobNamespace := .Values.spark.jobNamespaces | default list }}
{{- if $jobNamespace }}
{{- if ne $jobNamespace "" }}
---
apiVersion: rbac.authorization.k8s.io/v1

View File

@ -15,16 +15,19 @@ limitations under the License.
*/}}
{{- if .Values.spark.serviceAccount.create }}
{{- range $sparkJobNamespace := .Values.spark.jobNamespaces | default list }}
{{- range $jobNamespace := .Values.spark.jobNamespaces | default list }}
{{- if ne $jobNamespace "" }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "spark-operator.spark.serviceAccountName" $ }}
namespace: {{ $sparkJobNamespace }}
namespace: {{ $jobNamespace }}
labels: {{ include "spark-operator.labels" $ | nindent 4 }}
{{- with $.Values.spark.serviceAccount.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -51,8 +51,12 @@ spec:
- --zap-log-level={{ . }}
{{- end }}
{{- with .Values.spark.jobNamespaces }}
{{- if has "" . }}
- --namespaces=""
{{- else }}
- --namespaces={{ . | join "," }}
{{- end }}
{{- end }}
- --webhook-secret-name={{ include "spark-operator.webhook.secretName" . }}
- --webhook-secret-namespace={{ .Release.Namespace }}
- --webhook-svc-name={{ include "spark-operator.webhook.serviceName" . }}

View File

@ -34,16 +34,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
objectSelector:
matchLabels:
sparkoperator.k8s.io/launched-by-spark-operator: "true"
@ -67,16 +69,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
rules:
- apiGroups: ["sparkoperator.k8s.io"]
apiVersions: ["v1beta2"]
@ -97,16 +101,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
rules:
- apiGroups: ["sparkoperator.k8s.io"]
apiVersions: ["v1beta2"]

View File

@ -34,16 +34,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
rules:
- apiGroups: ["sparkoperator.k8s.io"]
apiVersions: ["v1beta2"]
@ -64,16 +66,18 @@ webhooks:
{{- with .Values.webhook.failurePolicy }}
failurePolicy: {{ . }}
{{- end }}
{{- if .Values.spark.jobNamespaces }}
{{- with .Values.spark.jobNamespaces }}
{{- if not (has "" .) }}
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
{{- range .Values.spark.jobNamespaces }}
- {{ . }}
{{- range $jobNamespace := . }}
- {{ $jobNamespace }}
{{- end }}
{{- end }}
{{- end }}
rules:
- apiGroups: ["sparkoperator.k8s.io"]
apiVersions: ["v1beta2"]

View File

@ -119,14 +119,26 @@ tests:
- it: Should contain `--namespaces` arg if `spark.jobNamespaces` is set
set:
spark.jobNamespaces:
- ns1
- ns2
spark:
jobNamespaces:
- ns1
- ns2
asserts:
- contains:
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
content: --namespaces=ns1,ns2
- it: Should set namespaces to all namespaces (`""`) if `spark.jobNamespaces` contains empty string
set:
spark:
jobNamespaces:
- ""
- default
asserts:
- contains:
path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args
content: --namespaces=""
- it: Should contain `--controller-threads` arg if `controller.workers` is set
set:
controller:

View File

@ -66,59 +66,36 @@ tests:
path: metadata.annotations.key2
value: value2
- it: Should create multiple service accounts if `spark.jobNamespaces` is set
- it: Should create service account for every non-empty spark job namespace if `spark.jobNamespaces` is set with multiple values
set:
spark:
serviceAccount:
name: spark
jobNamespaces:
- ""
- ns1
- ns2
- ns3
documentIndex: 0
asserts:
- hasDocuments:
count: 3
count: 2
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
name: spark-operator-spark
namespace: ns1
- it: Should create multiple service accounts if `spark.jobNamespaces` is set
- it: Should create service account for every non-empty spark job namespace if `spark.jobNamespaces` is set with multiple values
set:
spark:
serviceAccount:
name: spark
jobNamespaces:
- ""
- ns1
- ns2
- ns3
documentIndex: 1
asserts:
- hasDocuments:
count: 3
count: 2
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
name: spark-operator-spark
namespace: ns2
- it: Should create multiple service accounts if `spark.jobNamespaces` is set
set:
spark:
serviceAccount:
name: spark
jobNamespaces:
- ns1
- ns2
- ns3
documentIndex: 2
asserts:
- hasDocuments:
count: 3
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
namespace: ns3

View File

@ -124,6 +124,17 @@ tests:
path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args
content: --namespaces=ns1,ns2
- it: Should set namespaces to all namespaces (`""`) if `spark.jobNamespaces` contains empty string
set:
spark:
jobNamespaces:
- ""
- default
asserts:
- contains:
path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args
content: --namespaces=""
- it: Should contain `--enable-metrics` arg if `prometheus.metrics.enable` is set to `true`
set:
prometheus:

View File

@ -57,7 +57,7 @@ tests:
path: webhooks[*].failurePolicy
value: Fail
- it: Should set namespaceSelector if sparkJobNamespaces is not empty
- it: Should set namespaceSelector if `spark.jobNamespaces` is set with non-empty strings
set:
spark:
jobNamespaces:
@ -76,6 +76,19 @@ tests:
- ns2
- ns3
- it: Should not set namespaceSelector if `spark.jobNamespaces` contains empty string
set:
spark:
jobNamespaces:
- ""
- ns1
- ns2
- ns3
asserts:
- notExists:
path: webhooks[*].namespaceSelector
- it: Should should use the specified timeoutSeconds
set:
webhook:

View File

@ -57,7 +57,7 @@ tests:
path: webhooks[*].failurePolicy
value: Fail
- it: Should set namespaceSelector if `spark.jobNamespaces` is not empty
- it: Should set namespaceSelector if `spark.jobNamespaces` is set with non-empty strings
set:
spark.jobNamespaces:
- ns1
@ -75,6 +75,18 @@ tests:
- ns2
- ns3
- it: Should not set namespaceSelector if `spark.jobNamespaces` contains empty string
set:
spark:
jobNamespaces:
- ""
- ns1
- ns2
- ns3
asserts:
- notExists:
path: webhooks[*].namespaceSelector
- it: Should should use the specified timeoutSeconds
set:
webhook:

View File

@ -126,7 +126,7 @@ func NewStartCommand() *cobra.Command {
}
command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{""}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset or contains empty string.")
command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.")
command.Flags().BoolVar(&enableBatchScheduler, "enable-batch-scheduler", false, "Enable batch schedulers.")

View File

@ -130,7 +130,7 @@ func NewStartCommand() *cobra.Command {
}
command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{""}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.")
command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset or contains empty string.")
command.Flags().StringVar(&labelSelectorFilter, "label-selector-filter", "", "A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.")
command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.")

View File

@ -35,9 +35,14 @@ var _ predicate.Predicate = &EventFilter{}
// NewEventFilter creates a new EventFilter instance.
func NewEventFilter(namespaces []string) *EventFilter {
nsMap := make(map[string]bool)
for _, ns := range namespaces {
nsMap[ns] = true
if len(namespaces) == 0 {
nsMap[metav1.NamespaceAll] = true
} else {
for _, ns := range namespaces {
nsMap[ns] = true
}
}
return &EventFilter{
namespaces: nsMap,
}

View File

@ -42,8 +42,12 @@ var _ predicate.Predicate = &sparkPodEventFilter{}
// newSparkPodEventFilter creates a new SparkPodEventFilter instance.
func newSparkPodEventFilter(namespaces []string) *sparkPodEventFilter {
nsMap := make(map[string]bool)
for _, ns := range namespaces {
nsMap[ns] = true
if len(namespaces) == 0 {
nsMap[metav1.NamespaceAll] = true
} else {
for _, ns := range namespaces {
nsMap[ns] = true
}
}
return &sparkPodEventFilter{
@ -118,8 +122,12 @@ var _ predicate.Predicate = &EventFilter{}
func NewSparkApplicationEventFilter(client client.Client, recorder record.EventRecorder, namespaces []string) *EventFilter {
nsMap := make(map[string]bool)
for _, ns := range namespaces {
nsMap[ns] = true
if len(namespaces) == 0 {
nsMap[metav1.NamespaceAll] = true
} else {
for _, ns := range namespaces {
nsMap[ns] = true
}
}
return &EventFilter{