delete launcherOnMaster field (#116)
This commit is contained in:
parent
4e7701d552
commit
d8025e9c38
|
|
@ -43,11 +43,6 @@ type MPIJobSpec struct {
|
||||||
// +optional
|
// +optional
|
||||||
SlotsPerWorker *int32 `json:"slotsPerWorker,omitempty"`
|
SlotsPerWorker *int32 `json:"slotsPerWorker,omitempty"`
|
||||||
|
|
||||||
// Run the launcher on the master.
|
|
||||||
// Defaults to false.
|
|
||||||
// +optional
|
|
||||||
LauncherOnMaster bool `json:"launcherOnMaster,omitempty"`
|
|
||||||
|
|
||||||
// TODO: Move this to `RunPolicy` in common operator, see discussion in https://github.com/kubeflow/tf-operator/issues/960
|
// TODO: Move this to `RunPolicy` in common operator, see discussion in https://github.com/kubeflow/tf-operator/issues/960
|
||||||
// Specifies the number of retries before marking this job failed.
|
// Specifies the number of retries before marking this job failed.
|
||||||
// Defaults to 6.
|
// Defaults to 6.
|
||||||
|
|
|
||||||
|
|
@ -93,9 +93,6 @@ const (
|
||||||
// MPIJob is synced successfully.
|
// MPIJob is synced successfully.
|
||||||
MessageResourceSynced = "MPIJob synced successfully"
|
MessageResourceSynced = "MPIJob synced successfully"
|
||||||
|
|
||||||
// LabelNodeRoleMaster specifies that a node is a master
|
|
||||||
LabelNodeRoleMaster = "node-role.kubernetes.io/master"
|
|
||||||
|
|
||||||
// podTemplateRestartPolicyReason is the warning reason when the restart
|
// podTemplateRestartPolicyReason is the warning reason when the restart
|
||||||
// policy is set in pod template.
|
// policy is set in pod template.
|
||||||
podTemplateRestartPolicyReason = "SettedPodTemplateRestartPolicy"
|
podTemplateRestartPolicyReason = "SettedPodTemplateRestartPolicy"
|
||||||
|
|
@ -1068,35 +1065,6 @@ func (c *MPIJobController) newLauncher(mpiJob *kubeflow.MPIJob, kubectlDeliveryI
|
||||||
Value: fmt.Sprintf("%s/%s", configMountPath, hostfileName),
|
Value: fmt.Sprintf("%s/%s", configMountPath, hostfileName),
|
||||||
})
|
})
|
||||||
|
|
||||||
// determine if run the launcher on the master node
|
|
||||||
if mpiJob.Spec.LauncherOnMaster {
|
|
||||||
|
|
||||||
// support Tolerate
|
|
||||||
podSpec.Spec.Tolerations = []corev1.Toleration{
|
|
||||||
{
|
|
||||||
Key: LabelNodeRoleMaster,
|
|
||||||
Effect: corev1.TaintEffectNoSchedule,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// prefer to assign pod to master node
|
|
||||||
podSpec.Spec.Affinity = &corev1.Affinity{
|
|
||||||
NodeAffinity: &corev1.NodeAffinity{
|
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
|
|
||||||
NodeSelectorTerms: []corev1.NodeSelectorTerm{
|
|
||||||
{
|
|
||||||
MatchExpressions: []corev1.NodeSelectorRequirement{
|
|
||||||
{
|
|
||||||
Key: LabelNodeRoleMaster,
|
|
||||||
Operator: corev1.NodeSelectorOpExists,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
container.VolumeMounts = append(container.VolumeMounts,
|
container.VolumeMounts = append(container.VolumeMounts,
|
||||||
corev1.VolumeMount{
|
corev1.VolumeMount{
|
||||||
Name: kubectlVolumeName,
|
Name: kubectlVolumeName,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue