Added support for specifying PodSecurityContext
This commit is contained in:
parent
589b06c2e2
commit
e9e9b64452
|
@ -19,6 +19,8 @@ The Kubernetes Operator for Apache Spark ships with a command-line tool called `
|
|||
* [Using Secrets As Environment Variables](#using-secrets-as-environment-variables)
|
||||
* [Using Image Pull Secrets](#using-image-pull-secrets)
|
||||
* [Using Pod Affinity](#using-pod-affinity)
|
||||
* [Adding Tolerations](#adding-tolerations)
|
||||
* [Using Pod Security Context](#using-pod-security-context)
|
||||
* [Python Support](#python-support)
|
||||
* [Monitoring](#monitoring)
|
||||
* [Working with SparkApplications](#working-with-sparkapplications)
|
||||
|
@ -257,12 +259,12 @@ spec:
|
|||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
...
|
||||
...
|
||||
executor:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
...
|
||||
...
|
||||
```
|
||||
|
||||
Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook.
|
||||
|
@ -290,6 +292,23 @@ spec:
|
|||
Note that the mutating admission webhook is needed to use this feature. Please refer to the
|
||||
[Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook.
|
||||
|
||||
### Using Pod Security Context
|
||||
|
||||
A `SparkApplication` can specify a `PodSecurityContext` for the driver or executor pod, using the optional field `.spec.driver.securityContext` or `.spec.executor.securityContext`. Below is an example:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
driver:
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
executor:
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
```
|
||||
|
||||
Note that the mutating admission webhook is needed to use this feature. Please refer to the
|
||||
[Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook.
|
||||
|
||||
### Python Support
|
||||
|
||||
Python support can be enabled by setting `.spec.mainApplicationFile` with path to your python application. Optionaly, the `.spec.pythonVersion` field can be used to set the major Python version of the docker image used to run the driver and executor containers. Below is an example showing part of a `SparkApplication` specification:
|
||||
|
|
|
@ -374,6 +374,9 @@ type SparkPodSpec struct {
|
|||
// Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod.
|
||||
// Optional.
|
||||
Tolerations []apiv1.Toleration `json:"tolerations,omitempty"`
|
||||
// SecurityContenxt specifies the PodSecurityContext to apply.
|
||||
// Optional.
|
||||
SecurityContenxt *apiv1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||
}
|
||||
|
||||
// DriverSpec is specification of the driver.
|
||||
|
|
|
@ -51,9 +51,17 @@ func patchSparkPod(pod *corev1.Pod, app *v1beta1.SparkApplication) []patchOperat
|
|||
patchOps = append(patchOps, addSparkConfigMap(pod, app)...)
|
||||
patchOps = append(patchOps, addHadoopConfigMap(pod, app)...)
|
||||
patchOps = append(patchOps, addTolerations(pod, app)...)
|
||||
op := addAffinity(pod, app)
|
||||
if op != nil {
|
||||
patchOps = append(patchOps, *op)
|
||||
if pod.Spec.Affinity == nil {
|
||||
op := addAffinity(pod, app)
|
||||
if op != nil {
|
||||
patchOps = append(patchOps, *op)
|
||||
}
|
||||
}
|
||||
if pod.Spec.SecurityContext == nil {
|
||||
op := addSecurityContext(pod, app)
|
||||
if op != nil {
|
||||
patchOps = append(patchOps, *op)
|
||||
}
|
||||
}
|
||||
|
||||
return patchOps
|
||||
|
@ -265,3 +273,17 @@ func addToleration(pod *corev1.Pod, toleration corev1.Toleration) patchOperation
|
|||
|
||||
return patchOperation{Op: "add", Path: path, Value: value}
|
||||
}
|
||||
|
||||
func addSecurityContext(pod *corev1.Pod, app *v1beta1.SparkApplication) *patchOperation {
|
||||
var secContext *corev1.PodSecurityContext
|
||||
if util.IsDriverPod(pod) {
|
||||
secContext = app.Spec.Driver.SecurityContenxt
|
||||
} else if util.IsExecutorPod(pod) {
|
||||
secContext = app.Spec.Executor.SecurityContenxt
|
||||
}
|
||||
|
||||
if secContext == nil {
|
||||
return nil
|
||||
}
|
||||
return &patchOperation{Op: "add", Path: "/spec/securityContext", Value: *secContext}
|
||||
}
|
||||
|
|
|
@ -389,6 +389,80 @@ func TestPatchSparkPod_Tolerations(t *testing.T) {
|
|||
assert.Equal(t, app.Spec.Driver.Tolerations[0], modifiedPod.Spec.Tolerations[0])
|
||||
}
|
||||
|
||||
func TestPatchSparkPod_SecurityContext(t *testing.T) {
|
||||
var user int64 = 1000
|
||||
app := &v1beta1.SparkApplication{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "spark-test",
|
||||
UID: "spark-test-1",
|
||||
},
|
||||
Spec: v1beta1.SparkApplicationSpec{
|
||||
Driver: v1beta1.DriverSpec{
|
||||
SparkPodSpec: v1beta1.SparkPodSpec{
|
||||
SecurityContenxt: &corev1.PodSecurityContext{
|
||||
RunAsUser: &user,
|
||||
},
|
||||
},
|
||||
},
|
||||
Executor: v1beta1.ExecutorSpec{
|
||||
SparkPodSpec: v1beta1.SparkPodSpec{
|
||||
SecurityContenxt: &corev1.PodSecurityContext{
|
||||
RunAsUser: &user,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
driverPod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "spark-executor",
|
||||
Labels: map[string]string{
|
||||
config.SparkRoleLabel: config.SparkDriverRole,
|
||||
config.LaunchedBySparkOperatorLabel: "true",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: sparkDriverContainerName,
|
||||
Image: "spark-driver:latest",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
modifiedDriverPod, err := getModifiedPod(driverPod, app)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, app.Spec.Executor.SecurityContenxt, modifiedDriverPod.Spec.SecurityContext)
|
||||
|
||||
executorPod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "spark-executor",
|
||||
Labels: map[string]string{
|
||||
config.SparkRoleLabel: config.SparkExecutorRole,
|
||||
config.LaunchedBySparkOperatorLabel: "true",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: sparkExecutorContainerName,
|
||||
Image: "spark-executor:latest",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
modifiedExecutorPod, err := getModifiedPod(executorPod, app)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, app.Spec.Executor.SecurityContenxt, modifiedExecutorPod.Spec.SecurityContext)
|
||||
}
|
||||
|
||||
func getModifiedPod(pod *corev1.Pod, app *v1beta1.SparkApplication) (*corev1.Pod, error) {
|
||||
patchOps := patchSparkPod(pod, app)
|
||||
patchBytes, err := json.Marshal(patchOps)
|
||||
|
|
|
@ -101,6 +101,7 @@ func TestMutatePod(t *testing.T) {
|
|||
assert.True(t, len(response.Patch) > 0)
|
||||
|
||||
// 3. Test processing Spark pod with patches.
|
||||
var user int64 = 1000
|
||||
app2 := &spov1beta1.SparkApplication{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "spark-app2",
|
||||
|
@ -151,6 +152,9 @@ func TestMutatePod(t *testing.T) {
|
|||
Effect: "NoEffect",
|
||||
},
|
||||
},
|
||||
SecurityContenxt: &corev1.PodSecurityContext{
|
||||
RunAsUser: &user,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -170,7 +174,7 @@ func TestMutatePod(t *testing.T) {
|
|||
assert.True(t, len(response.Patch) > 0)
|
||||
var patchOps []*patchOperation
|
||||
json.Unmarshal(response.Patch, &patchOps)
|
||||
assert.Equal(t, 5, len(patchOps))
|
||||
assert.Equal(t, 6, len(patchOps))
|
||||
}
|
||||
|
||||
func serializePod(pod *corev1.Pod) ([]byte, error) {
|
||||
|
|
Loading…
Reference in New Issue