From e9e9b64452aeed55e8bbdc24467485989106a4ee Mon Sep 17 00:00:00 2001 From: Yinan Li Date: Mon, 11 Mar 2019 14:52:15 -0700 Subject: [PATCH] Added support for specifying PodSecurityContext --- docs/user-guide.md | 23 +++++- .../sparkoperator.k8s.io/v1beta1/types.go | 3 + pkg/webhook/patch.go | 28 ++++++- pkg/webhook/patch_test.go | 74 +++++++++++++++++++ pkg/webhook/webhook_test.go | 6 +- 5 files changed, 128 insertions(+), 6 deletions(-) diff --git a/docs/user-guide.md b/docs/user-guide.md index e2eb2ad4..e0d3151b 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -19,6 +19,8 @@ The Kubernetes Operator for Apache Spark ships with a command-line tool called ` * [Using Secrets As Environment Variables](#using-secrets-as-environment-variables) * [Using Image Pull Secrets](#using-image-pull-secrets) * [Using Pod Affinity](#using-pod-affinity) + * [Adding Tolerations](#adding-tolerations) + * [Using Pod Security Context](#using-pod-security-context) * [Python Support](#python-support) * [Monitoring](#monitoring) * [Working with SparkApplications](#working-with-sparkapplications) @@ -257,12 +259,12 @@ spec: affinity: podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - ... + ... executor: affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - ... + ... ``` Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. @@ -290,6 +292,23 @@ spec: Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. +### Using Pod Security Context + +A `SparkApplication` can specify a `PodSecurityContext` for the driver or executor pod, using the optional field `.spec.driver.securityContext` or `.spec.executor.securityContext`. Below is an example: + +```yaml +spec: + driver: + securityContext: + runAsUser: 1000 + executor: + securityContext: + runAsUser: 1000 +``` + +Note that the mutating admission webhook is needed to use this feature. Please refer to the +[Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. + ### Python Support Python support can be enabled by setting `.spec.mainApplicationFile` with path to your python application. Optionaly, the `.spec.pythonVersion` field can be used to set the major Python version of the docker image used to run the driver and executor containers. Below is an example showing part of a `SparkApplication` specification: diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go index c7f37807..46c33501 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go @@ -374,6 +374,9 @@ type SparkPodSpec struct { // Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod. // Optional. Tolerations []apiv1.Toleration `json:"tolerations,omitempty"` + // SecurityContenxt specifies the PodSecurityContext to apply. + // Optional. + SecurityContenxt *apiv1.PodSecurityContext `json:"securityContext,omitempty"` } // DriverSpec is specification of the driver. diff --git a/pkg/webhook/patch.go b/pkg/webhook/patch.go index b2d525ae..686586ed 100644 --- a/pkg/webhook/patch.go +++ b/pkg/webhook/patch.go @@ -51,9 +51,17 @@ func patchSparkPod(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperat patchOps = append(patchOps, addSparkConfigMap(pod, app)...) patchOps = append(patchOps, addHadoopConfigMap(pod, app)...) patchOps = append(patchOps, addTolerations(pod, app)...) - op := addAffinity(pod, app) - if op != nil { - patchOps = append(patchOps, *op) + if pod.Spec.Affinity == nil { + op := addAffinity(pod, app) + if op != nil { + patchOps = append(patchOps, *op) + } + } + if pod.Spec.SecurityContext == nil { + op := addSecurityContext(pod, app) + if op != nil { + patchOps = append(patchOps, *op) + } } return patchOps @@ -265,3 +273,17 @@ func addToleration(pod *corev1.Pod, toleration corev1.Toleration) patchOperation return patchOperation{Op: "add", Path: path, Value: value} } + +func addSecurityContext(pod *corev1.Pod, app *v1beta1.SparkApplication) *patchOperation { + var secContext *corev1.PodSecurityContext + if util.IsDriverPod(pod) { + secContext = app.Spec.Driver.SecurityContenxt + } else if util.IsExecutorPod(pod) { + secContext = app.Spec.Executor.SecurityContenxt + } + + if secContext == nil { + return nil + } + return &patchOperation{Op: "add", Path: "/spec/securityContext", Value: *secContext} +} diff --git a/pkg/webhook/patch_test.go b/pkg/webhook/patch_test.go index 646d69ad..29f7fa99 100644 --- a/pkg/webhook/patch_test.go +++ b/pkg/webhook/patch_test.go @@ -389,6 +389,80 @@ func TestPatchSparkPod_Tolerations(t *testing.T) { assert.Equal(t, app.Spec.Driver.Tolerations[0], modifiedPod.Spec.Tolerations[0]) } +func TestPatchSparkPod_SecurityContext(t *testing.T) { + var user int64 = 1000 + app := &v1beta1.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-test", + UID: "spark-test-1", + }, + Spec: v1beta1.SparkApplicationSpec{ + Driver: v1beta1.DriverSpec{ + SparkPodSpec: v1beta1.SparkPodSpec{ + SecurityContenxt: &corev1.PodSecurityContext{ + RunAsUser: &user, + }, + }, + }, + Executor: v1beta1.ExecutorSpec{ + SparkPodSpec: v1beta1.SparkPodSpec{ + SecurityContenxt: &corev1.PodSecurityContext{ + RunAsUser: &user, + }, + }, + }, + }, + } + + driverPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-executor", + Labels: map[string]string{ + config.SparkRoleLabel: config.SparkDriverRole, + config.LaunchedBySparkOperatorLabel: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: sparkDriverContainerName, + Image: "spark-driver:latest", + }, + }, + }, + } + + modifiedDriverPod, err := getModifiedPod(driverPod, app) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, app.Spec.Executor.SecurityContenxt, modifiedDriverPod.Spec.SecurityContext) + + executorPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-executor", + Labels: map[string]string{ + config.SparkRoleLabel: config.SparkExecutorRole, + config.LaunchedBySparkOperatorLabel: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: sparkExecutorContainerName, + Image: "spark-executor:latest", + }, + }, + }, + } + + modifiedExecutorPod, err := getModifiedPod(executorPod, app) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, app.Spec.Executor.SecurityContenxt, modifiedExecutorPod.Spec.SecurityContext) +} + func getModifiedPod(pod *corev1.Pod, app *v1beta1.SparkApplication) (*corev1.Pod, error) { patchOps := patchSparkPod(pod, app) patchBytes, err := json.Marshal(patchOps) diff --git a/pkg/webhook/webhook_test.go b/pkg/webhook/webhook_test.go index 2ff8f3af..900abb34 100644 --- a/pkg/webhook/webhook_test.go +++ b/pkg/webhook/webhook_test.go @@ -101,6 +101,7 @@ func TestMutatePod(t *testing.T) { assert.True(t, len(response.Patch) > 0) // 3. Test processing Spark pod with patches. + var user int64 = 1000 app2 := &spov1beta1.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-app2", @@ -151,6 +152,9 @@ func TestMutatePod(t *testing.T) { Effect: "NoEffect", }, }, + SecurityContenxt: &corev1.PodSecurityContext{ + RunAsUser: &user, + }, }, }, }, @@ -170,7 +174,7 @@ func TestMutatePod(t *testing.T) { assert.True(t, len(response.Patch) > 0) var patchOps []*patchOperation json.Unmarshal(response.Patch, &patchOps) - assert.Equal(t, 5, len(patchOps)) + assert.Equal(t, 6, len(patchOps)) } func serializePod(pod *corev1.Pod) ([]byte, error) {