feat(ws): initial Workspace and WorkspaceKind controller loops (#22)

* feat(ws): implement a reconciliation loop for the workspace

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* remove comments

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* add correct rbac permission for controller

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* implemented collision handling using ownerReferences

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* update the status field during workspace reconciliation

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* add watcher to workspace kind

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* handle the case that multiple ports are specified for an image

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* generate correctly the StatefulSet spec

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* set status.state of the Workspace

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* add rbac permission for configmap

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* update dockerfile

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* mathew updates

Signed-off-by: Mathew Wicks <5735406+thesuperzapper@users.noreply.github.com>

* mathew updates 2

Signed-off-by: Mathew Wicks <5735406+thesuperzapper@users.noreply.github.com>

* mathew updates 3

Signed-off-by: Mathew Wicks <5735406+thesuperzapper@users.noreply.github.com>

* fix todos

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* mathew updates 4

Signed-off-by: Mathew Wicks <5735406+thesuperzapper@users.noreply.github.com>

* handle extraEnv value replacement

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>

* mathew updates 5

Signed-off-by: Mathew Wicks <5735406+thesuperzapper@users.noreply.github.com>

* mathew updates 6

Signed-off-by: Mathew Wicks <5735406+thesuperzapper@users.noreply.github.com>

---------

Signed-off-by: Adem Baccara <71262172+Adembc@users.noreply.github.com>
Signed-off-by: Mathew Wicks <5735406+thesuperzapper@users.noreply.github.com>
Co-authored-by: Mathew Wicks <5735406+thesuperzapper@users.noreply.github.com>
This commit is contained in:
Adem Baccara 2024-07-15 22:21:55 +01:00 committed by GitHub
parent ca78327ea5
commit 0cacff77ee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 4659 additions and 460 deletions

View File

@ -14,7 +14,7 @@ RUN go mod download
# Copy the go source
COPY cmd/main.go cmd/main.go
COPY api/ api/
COPY internal/controller/ internal/controller/
COPY internal/ internal/
# Build
# the GOARCH has not a default value to allow the binary be built according to the host where the command

View File

@ -36,6 +36,12 @@ type WorkspaceSpec struct {
//+kubebuilder:default=false
Paused *bool `json:"paused,omitempty"`
// if true, pending updates are NOT applied when the Workspace is paused
// if false, pending updates are applied when the Workspace is paused
//+kubebuilder:validation:Optional
//+kubebuilder:default=false
DeferUpdates *bool `json:"deferUpdates,omitempty"`
// the WorkspaceKind to use
//+kubebuilder:validation:MinLength:=2
//+kubebuilder:validation:MaxLength:=63
@ -76,18 +82,21 @@ type WorkspacePodVolumes struct {
// - this PVC must be RWX (ReadWriteMany, ReadWriteOnce)
// - the mount path is defined in the WorkspaceKind under
// `spec.podTemplate.volumeMounts.home`
//+kubebuilder:validation:Optional
//+kubebuilder:validation:MinLength:=2
//+kubebuilder:validation:MaxLength:=63
//+kubebuilder:validation:Pattern:=^[a-z0-9][-a-z0-9]*[a-z0-9]$
//+kubebuilder:example="my-home-pvc"
Home string `json:"home"`
Home *string `json:"home,omitempty"`
// additional PVCs to mount
// - these PVCs must already exist in the Namespace
// - these PVCs must be RWX (ReadWriteMany, ReadWriteOnce)
// - these PVC must already exist in the Namespace
// - the same PVC can be mounted multiple times with different `mountPaths`
// - if `readOnly` is false, the PVC must be RWX (ReadWriteMany, ReadWriteOnce)
// - if `readOnly` is true, the PVC must be ReadOnlyMany
//+kubebuilder:validation:Optional
//+listType:="map"
//+listMapKey:="name"
//+listMapKey:="mountPath"
Data []PodVolumeMount `json:"data,omitempty"`
}
@ -97,7 +106,7 @@ type PodVolumeMount struct {
//+kubebuilder:validation:MaxLength:=63
//+kubebuilder:validation:Pattern:=^[a-z0-9][-a-z0-9]*[a-z0-9]$
//+kubebuilder:example="my-data-pvc"
Name string `json:"name"`
PVCName string `json:"pvcName"`
// the mount path for the PVC
//+kubebuilder:validation:MinLength:=2
@ -105,18 +114,27 @@ type PodVolumeMount struct {
//+kubebuilder:validation:Pattern:=^/[^/].*$
//+kubebuilder:example="/data/my-data"
MountPath string `json:"mountPath"`
// if the PVC should be mounted as ReadOnly
//+kubebuilder:validation:Optional
//+kubebuilder:default=false
ReadOnly *bool `json:"readOnly,omitempty"`
}
type WorkspacePodOptions struct {
// the id of an imageConfig option
// - options are defined in WorkspaceKind under
// `spec.podTemplate.options.imageConfig.values[]`
//+kubebuilder:example="jupyter_scipy_170"
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
//+kubebuilder:example="jupyterlab_scipy_190"
ImageConfig string `json:"imageConfig"`
// the id of a podConfig option
// - options are defined in WorkspaceKind under
// `spec.podTemplate.options.podConfig.values[]`
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
//+kubebuilder:example="big_gpu"
PodConfig string `json:"podConfig"`
}
@ -129,11 +147,12 @@ type WorkspacePodOptions struct {
// WorkspaceStatus defines the observed state of Workspace
type WorkspaceStatus struct {
// activity information for the Workspace, used to determine when to cull
Activity WorkspaceActivity `json:"activity"`
// the time when the Workspace was paused, 0 if the Workspace is not paused
// the time when the Workspace was paused (UNIX epoch)
// - set to 0 when the Workspace is NOT paused
//+kubebuilder:default=0
//+kubebuilder:example=1704067200
PauseTime int64 `json:"pauseTime"`
@ -142,32 +161,66 @@ type WorkspaceStatus struct {
// and so will be patched on the next restart
// - true if the WorkspaceKind has changed one of its common `podTemplate` fields
// like `podMetadata`, `probes`, `extraEnv`, or `containerSecurityContext`
//+kubebuilder:example=false
//+kubebuilder:default=false
PendingRestart bool `json:"pendingRestart"`
// the `spec.podTemplate.options` which will take effect after the next restart
PodTemplateOptions WorkspacePodOptions `json:"podTemplateOptions"`
// information about the current podTemplate options
PodTemplateOptions WorkspacePodOptionsStatus `json:"podTemplateOptions"`
// the current state of the Workspace
//+kubebuilder:example="Running"
//+kubebuilder:default="Unknown"
State WorkspaceState `json:"state"`
// a human-readable message about the state of the Workspace
// - WARNING: this field is NOT FOR MACHINE USE, subject to change without notice
//+kubebuilder:example="Pod is not ready"
//+kubebuilder:default=""
StateMessage string `json:"stateMessage"`
}
type WorkspaceActivity struct {
// the last time activity was observed on the Workspace (UNIX epoch)
//+kubebuilder:default=0
//+kubebuilder:example=1704067200
LastActivity int64 `json:"lastActivity"`
// the last time we checked for activity on the Workspace (UNIX epoch)
//+kubebuilder:default=0
//+kubebuilder:example=1704067200
LastUpdate int64 `json:"lastUpdate"`
}
type WorkspacePodOptionsStatus struct {
// info about the current imageConfig option
ImageConfig WorkspacePodOptionInfo `json:"imageConfig"`
// info about the current podConfig option
PodConfig WorkspacePodOptionInfo `json:"podConfig"`
}
type WorkspacePodOptionInfo struct {
// the option id which will take effect after the next restart
//+kubebuilder:validation:Optional
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
Desired string `json:"desired,omitempty"`
// the chain from the current option to the desired option
//+kubebuilder:validation:Optional
RedirectChain []WorkspacePodOptionRedirectStep `json:"redirectChain,omitempty"`
}
type WorkspacePodOptionRedirectStep struct {
// the source option id
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
Source string `json:"source"`
// the target option id
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
Target string `json:"target"`
}
// +kubebuilder:validation:Enum:={"Running","Terminating","Paused","Pending","Error","Unknown"}
type WorkspaceState string

View File

@ -98,7 +98,6 @@ type WorkspaceKindConfigMap struct {
type WorkspaceKindPodTemplate struct {
// metadata for Workspace Pods (MUTABLE)
// - changes are applied the NEXT time each Workspace is PAUSED
//+kubebuilder:validation:Optional
PodMetadata *WorkspaceKindPodMetadata `json:"podMetadata,omitempty"`
@ -110,7 +109,6 @@ type WorkspaceKindPodTemplate struct {
Culling *WorkspaceKindCullingConfig `json:"culling,omitempty"`
// standard probes to determine Container health (MUTABLE)
// - changes are applied the NEXT time each Workspace is PAUSED
//+kubebuilder:validation:Optional
Probes *WorkspaceKindProbes `json:"probes,omitempty"`
@ -122,16 +120,30 @@ type WorkspaceKindPodTemplate struct {
HTTPProxy *HTTPProxy `json:"httpProxy,omitempty"`
// environment variables for Workspace Pods (MUTABLE)
// - changes are applied the NEXT time each Workspace is PAUSED
// - the following string templates are available:
// - `.PathPrefix`: the path prefix of the Workspace (e.g. '/workspace/{profile_name}/{workspace_name}/')
// - the following go template functions are available:
// - `httpPathPrefix(portId string)`: returns the HTTP path prefix of the specified port
//+kubebuilder:validation:Optional
//+listType:="map"
//+listMapKey:="name"
ExtraEnv []v1.EnvVar `json:"extraEnv,omitempty"`
// container SecurityContext for Workspace Pods (MUTABLE)
// - changes are applied the NEXT time each Workspace is PAUSED
// extra volume mounts for Workspace Pods (MUTABLE)
//+kubebuilder:validation:Optional
//+listType:="map"
//+listMapKey:="mountPath"
ExtraVolumeMounts []v1.VolumeMount `json:"extraVolumeMounts,omitempty"`
// extra volumes for Workspace Pods (MUTABLE)
//+kubebuilder:validation:Optional
//+listType:="map"
//+listMapKey:="name"
ExtraVolumes []v1.Volume `json:"extraVolumes,omitempty"`
// security context for Workspace Pods (MUTABLE)
//+kubebuilder:validation:Optional
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
// container security context for Workspace Pods (MUTABLE)
//+kubebuilder:validation:Optional
ContainerSecurityContext *v1.SecurityContext `json:"containerSecurityContext,omitempty"`
@ -170,7 +182,7 @@ type WorkspaceKindCullingConfig struct {
//+kubebuilder:validation:Optional
//+kubebuilder:validation:Minimum:=60
//+kubebuilder:default=86400
MaxInactiveSeconds *int64 `json:"maxInactiveSeconds,omitempty"`
MaxInactiveSeconds *int32 `json:"maxInactiveSeconds,omitempty"`
// the probe used to determine if the Workspace is active
ActivityProbe ActivityProbe `json:"activityProbe"`
@ -275,9 +287,8 @@ type WorkspaceKindPodOptions struct {
}
type ImageConfig struct {
// the id of the default image config
//+kubebuilder:example:="jupyter_scipy_171"
Default string `json:"default"`
// spawner ui configs
Spawner OptionsSpawnerConfig `json:"spawner"`
// the list of image configs that are available
//+kubebuilder:validation:MinItems:=1
@ -288,7 +299,9 @@ type ImageConfig struct {
type ImageConfigValue struct {
// the id of this image config
//+kubebuilder:example:="jupyter_scipy_171"
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
//+kubebuilder:example:="jupyterlab_scipy_190"
Id string `json:"id"`
// information for the spawner ui
@ -319,15 +332,19 @@ type ImageConfigSpec struct {
// - if multiple ports are defined, the user will see multiple "Connect" buttons
// in a dropdown menu on the Workspace overview page
//+kubebuilder:validation:MinItems:=1
//+listType:="map"
//+listMapKey:="id"
Ports []ImagePort `json:"ports"`
}
type ImagePort struct {
// the display name of the port
//+kubebuilder:validation:MinLength:=2
//+kubebuilder:validation:MaxLength:=64
//+kubebuilder:example:="JupyterLab"
DisplayName string `json:"displayName"`
// the id of the port
// - this is NOT used as the Container or Service port name, but as part of the HTTP path
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=32
//+kubebuilder:validation:Pattern:=^[a-z0-9][a-z0-9_-]*[a-z0-9]$
//+kubebuilder:example="jupyterlab"
Id string `json:"id"`
// the port number
//+kubebuilder:validation:Minimum:=1
@ -335,6 +352,12 @@ type ImagePort struct {
//+kubebuilder:example:=8888
Port int32 `json:"port"`
// the display name of the port
//+kubebuilder:validation:MinLength:=2
//+kubebuilder:validation:MaxLength:=64
//+kubebuilder:example:="JupyterLab"
DisplayName string `json:"displayName"`
// the protocol of the port
//+kubebuilder:example:="HTTP"
Protocol ImagePortProtocol `json:"protocol"`
@ -348,9 +371,8 @@ const (
)
type PodConfig struct {
// the id of the default pod config
//+kubebuilder:example="big_gpu"
Default string `json:"default"`
// spawner ui configs
Spawner OptionsSpawnerConfig `json:"spawner"`
// the list of pod configs that are available
//+kubebuilder:validation:MinItems:=1
@ -361,6 +383,8 @@ type PodConfig struct {
type PodConfigValue struct {
// the id of this pod config
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
//+kubebuilder:example="big_gpu"
Id string `json:"id"`
@ -394,6 +418,15 @@ type PodConfigSpec struct {
Resources *v1.ResourceRequirements `json:"resources,omitempty"`
}
type OptionsSpawnerConfig struct {
// the id of the default option
// - this will be selected by default in the spawner ui
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
//+kubebuilder:example="jupyterlab_scipy_190"
Default string `json:"default"`
}
type OptionSpawnerInfo struct {
// the display name of the option
//+kubebuilder:validation:MinLength:=2
@ -426,20 +459,18 @@ type OptionSpawnerLabel struct {
Key string `json:"key"`
// the value of the label
//+kubebuilder:validation:MinLength:=2
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=64
Value string `json:"value"`
}
type OptionRedirect struct {
// the id of the option to redirect to
//+kubebuilder:example:="jupyter_scipy_171"
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
//+kubebuilder:example:="jupyterlab_scipy_190"
To string `json:"to"`
// if the redirect will be applied after the next restart of the Workspace
//+kubebuilder:example:=true
WaitForRestart bool `json:"waitForRestart"`
// information about the redirect
//+kubebuilder:validation:Optional
Message *RedirectMessage `json:"message,omitempty"`
@ -476,8 +507,8 @@ const (
type WorkspaceKindStatus struct {
// the number of Workspaces that are using this WorkspaceKind
//+kubebuilder:example=3
Workspaces int64 `json:"workspaces"`
//+kubebuilder:default=0
Workspaces int32 `json:"workspaces"`
// metrics for podTemplate options
PodTemplateOptions PodTemplateOptionsMetrics `json:"podTemplateOptions"`
@ -497,12 +528,14 @@ type PodTemplateOptionsMetrics struct {
type OptionMetric struct {
// the id of the option
//+kubebuilder:validation:MinLength:=1
//+kubebuilder:validation:MaxLength:=256
//+kubebuilder:example="big_gpu"
Id string `json:"id"`
// the number of Workspaces currently using the option
//+kubebuilder:example=3
Workspaces int64 `json:"workspaces"`
Workspaces int32 `json:"workspaces"`
}
/*

View File

@ -113,6 +113,7 @@ func (in *HTTPProxy) DeepCopy() *HTTPProxy {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageConfig) DeepCopyInto(out *ImageConfig) {
*out = *in
out.Spawner = in.Spawner
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]ImageConfigValue, len(*in))
@ -308,9 +309,25 @@ func (in *OptionSpawnerLabel) DeepCopy() *OptionSpawnerLabel {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OptionsSpawnerConfig) DeepCopyInto(out *OptionsSpawnerConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsSpawnerConfig.
func (in *OptionsSpawnerConfig) DeepCopy() *OptionsSpawnerConfig {
if in == nil {
return nil
}
out := new(OptionsSpawnerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodConfig) DeepCopyInto(out *PodConfig) {
*out = *in
out.Spawner = in.Spawner
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]PodConfigValue, len(*in))
@ -419,6 +436,11 @@ func (in *PodTemplateOptionsMetrics) DeepCopy() *PodTemplateOptionsMetrics {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeMount) DeepCopyInto(out *PodVolumeMount) {
*out = *in
if in.ReadOnly != nil {
in, out := &in.ReadOnly, &out.ReadOnly
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeMount.
@ -452,7 +474,7 @@ func (in *Workspace) DeepCopyInto(out *Workspace) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace.
@ -540,7 +562,7 @@ func (in *WorkspaceKindCullingConfig) DeepCopyInto(out *WorkspaceKindCullingConf
}
if in.MaxInactiveSeconds != nil {
in, out := &in.MaxInactiveSeconds, &out.MaxInactiveSeconds
*out = new(int64)
*out = new(int32)
**out = **in
}
in.ActivityProbe.DeepCopyInto(&out.ActivityProbe)
@ -691,6 +713,25 @@ func (in *WorkspaceKindPodTemplate) DeepCopyInto(out *WorkspaceKindPodTemplate)
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtraVolumeMounts != nil {
in, out := &in.ExtraVolumeMounts, &out.ExtraVolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtraVolumes != nil {
in, out := &in.ExtraVolumes, &out.ExtraVolumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.PodSecurityContext)
(*in).DeepCopyInto(*out)
}
if in.ContainerSecurityContext != nil {
in, out := &in.ContainerSecurityContext, &out.ContainerSecurityContext
*out = new(v1.SecurityContext)
@ -895,6 +936,41 @@ func (in *WorkspacePodMetadata) DeepCopy() *WorkspacePodMetadata {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspacePodOptionInfo) DeepCopyInto(out *WorkspacePodOptionInfo) {
*out = *in
if in.RedirectChain != nil {
in, out := &in.RedirectChain, &out.RedirectChain
*out = make([]WorkspacePodOptionRedirectStep, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePodOptionInfo.
func (in *WorkspacePodOptionInfo) DeepCopy() *WorkspacePodOptionInfo {
if in == nil {
return nil
}
out := new(WorkspacePodOptionInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspacePodOptionRedirectStep) DeepCopyInto(out *WorkspacePodOptionRedirectStep) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePodOptionRedirectStep.
func (in *WorkspacePodOptionRedirectStep) DeepCopy() *WorkspacePodOptionRedirectStep {
if in == nil {
return nil
}
out := new(WorkspacePodOptionRedirectStep)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspacePodOptions) DeepCopyInto(out *WorkspacePodOptions) {
*out = *in
@ -910,6 +986,23 @@ func (in *WorkspacePodOptions) DeepCopy() *WorkspacePodOptions {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspacePodOptionsStatus) DeepCopyInto(out *WorkspacePodOptionsStatus) {
*out = *in
in.ImageConfig.DeepCopyInto(&out.ImageConfig)
in.PodConfig.DeepCopyInto(&out.PodConfig)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePodOptionsStatus.
func (in *WorkspacePodOptionsStatus) DeepCopy() *WorkspacePodOptionsStatus {
if in == nil {
return nil
}
out := new(WorkspacePodOptionsStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspacePodTemplate) DeepCopyInto(out *WorkspacePodTemplate) {
*out = *in
@ -935,10 +1028,17 @@ func (in *WorkspacePodTemplate) DeepCopy() *WorkspacePodTemplate {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkspacePodVolumes) DeepCopyInto(out *WorkspacePodVolumes) {
*out = *in
if in.Home != nil {
in, out := &in.Home, &out.Home
*out = new(string)
**out = **in
}
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make([]PodVolumeMount, len(*in))
copy(*out, *in)
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
@ -960,6 +1060,11 @@ func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) {
*out = new(bool)
**out = **in
}
if in.DeferUpdates != nil {
in, out := &in.DeferUpdates, &out.DeferUpdates
*out = new(bool)
**out = **in
}
in.PodTemplate.DeepCopyInto(&out.PodTemplate)
}
@ -977,7 +1082,7 @@ func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec {
func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) {
*out = *in
out.Activity = in.Activity
out.PodTemplateOptions = in.PodTemplateOptions
in.PodTemplateOptions.DeepCopyInto(&out.PodTemplateOptions)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus.

View File

@ -44,6 +44,12 @@ spec:
spec:
description: WorkspaceSpec defines the desired state of Workspace
properties:
deferUpdates:
default: false
description: |-
if true, pending updates are NOT applied when the Workspace is paused
if false, pending updates are applied when the Workspace is paused
type: boolean
kind:
description: the WorkspaceKind to use
example: jupyterlab
@ -69,7 +75,9 @@ spec:
the id of an imageConfig option
- options are defined in WorkspaceKind under
`spec.podTemplate.options.imageConfig.values[]`
example: jupyter_scipy_170
example: jupyterlab_scipy_190
maxLength: 256
minLength: 1
type: string
podConfig:
description: |-
@ -77,6 +85,8 @@ spec:
- options are defined in WorkspaceKind under
`spec.podTemplate.options.podConfig.values[]`
example: big_gpu
maxLength: 256
minLength: 1
type: string
required:
- imageConfig
@ -102,8 +112,10 @@ spec:
data:
description: |-
additional PVCs to mount
- these PVCs must already exist in the Namespace
- these PVCs must be RWX (ReadWriteMany, ReadWriteOnce)
- these PVC must already exist in the Namespace
- the same PVC can be mounted multiple times with different `mountPaths`
- if `readOnly` is false, the PVC must be RWX (ReadWriteMany, ReadWriteOnce)
- if `readOnly` is true, the PVC must be ReadOnlyMany
items:
properties:
mountPath:
@ -113,20 +125,24 @@ spec:
minLength: 2
pattern: ^/[^/].*$
type: string
name:
pvcName:
description: the name of the PVC to mount
example: my-data-pvc
maxLength: 63
minLength: 2
pattern: ^[a-z0-9][-a-z0-9]*[a-z0-9]$
type: string
readOnly:
default: false
description: if the PVC should be mounted as ReadOnly
type: boolean
required:
- mountPath
- name
- pvcName
type: object
type: array
x-kubernetes-list-map-keys:
- name
- mountPath
x-kubernetes-list-type: map
home:
description: |-
@ -140,8 +156,6 @@ spec:
minLength: 2
pattern: ^[a-z0-9][-a-z0-9]*[a-z0-9]$
type: string
required:
- home
type: object
required:
- options
@ -159,12 +173,14 @@ spec:
when to cull
properties:
lastActivity:
default: 0
description: the last time activity was observed on the Workspace
(UNIX epoch)
example: 1704067200
format: int64
type: integer
lastUpdate:
default: 0
description: the last time we checked for activity on the Workspace
(UNIX epoch)
example: 1704067200
@ -175,43 +191,91 @@ spec:
- lastUpdate
type: object
pauseTime:
description: the time when the Workspace was paused, 0 if the Workspace
is not paused
default: 0
description: |-
the time when the Workspace was paused (UNIX epoch)
- set to 0 when the Workspace is NOT paused
example: 1704067200
format: int64
type: integer
pendingRestart:
default: false
description: |-
if the current Pod does not reflect the current "desired" state
- true if any `spec.podTemplate.options` have a redirect
and so will be patched on the next restart
- true if the WorkspaceKind has changed one of its common `podTemplate` fields
like `podMetadata`, `probes`, `extraEnv`, or `containerSecurityContext`
example: false
type: boolean
podTemplateOptions:
description: the `spec.podTemplate.options` which will take effect
after the next restart
description: information about the current podTemplate options
properties:
imageConfig:
description: |-
the id of an imageConfig option
- options are defined in WorkspaceKind under
`spec.podTemplate.options.imageConfig.values[]`
example: jupyter_scipy_170
type: string
description: info about the current imageConfig option
properties:
desired:
description: the option id which will take effect after the
next restart
maxLength: 256
minLength: 1
type: string
redirectChain:
description: the chain from the current option to the desired
option
items:
properties:
source:
description: the source option id
maxLength: 256
minLength: 1
type: string
target:
description: the target option id
maxLength: 256
minLength: 1
type: string
required:
- source
- target
type: object
type: array
type: object
podConfig:
description: |-
the id of a podConfig option
- options are defined in WorkspaceKind under
`spec.podTemplate.options.podConfig.values[]`
example: big_gpu
type: string
description: info about the current podConfig option
properties:
desired:
description: the option id which will take effect after the
next restart
maxLength: 256
minLength: 1
type: string
redirectChain:
description: the chain from the current option to the desired
option
items:
properties:
source:
description: the source option id
maxLength: 256
minLength: 1
type: string
target:
description: the target option id
maxLength: 256
minLength: 1
type: string
required:
- source
- target
type: object
type: array
type: object
required:
- imageConfig
- podConfig
type: object
state:
default: Unknown
description: the current state of the Workspace
enum:
- Running
@ -220,13 +284,12 @@ spec:
- Pending
- Error
- Unknown
example: Running
type: string
stateMessage:
default: ""
description: |-
a human-readable message about the state of the Workspace
- WARNING: this field is NOT FOR MACHINE USE, subject to change without notice
example: Pod is not ready
type: string
required:
- activity

View File

@ -4,6 +4,46 @@ kind: ClusterRole
metadata:
name: manager-role
rules:
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- kubeflow.org
resources:
@ -56,3 +96,15 @@ rules:
- get
- patch
- update
- apiGroups:
- networking.istio.io
resources:
- virtualservices
verbs:
- create
- delete
- get
- list
- patch
- update
- watch

View File

@ -1,9 +1,74 @@
apiVersion: kubeflow.org/v1beta1
kind: Workspace
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: workspace-sample
name: my-workspace
namespace: default
spec:
# TODO(user): Add fields here
## if the workspace is paused (no pods running)
paused: false
## if true, pending updates are NOT applied when the Workspace is paused
## if false, pending updates are applied when the Workspace is paused
deferUpdates: false
## the WorkspaceKind to use
kind: "jupyterlab"
## options for "podTemplate"-type WorkspaceKinds
##
podTemplate:
## metadata to be applied to the Pod resource
##
podMetadata:
## labels to be applied to the Pod resource
## - labels are required to support integration with the PodDefault resource:
## https://github.com/kubeflow/kubeflow/blob/master/components/admission-webhook/pkg/apis/settings/v1alpha1/poddefault_types.go
##
labels: {}
## annotations to be applied to the Pod resource
##
annotations: {}
## volume configs
##
volumes:
## the name of the PVC to mount as the home volume
## - this PVC must already exist in the Namespace
## - this PVC must be RWX (ReadWriteMany, ReadWriteOnce)
## - the mount path is defined in the WorkspaceKind under
## `spec.podTemplate.volumeMounts.home`
##
home: "my-home-pvc"
## additional PVCs to mount
## - these PVC must already exist in the Namespace
## - the same PVC can be mounted multiple times with different `mountPaths`
## - if `readOnly` is false, the PVC must be RWX (ReadWriteMany, ReadWriteOnce)
## - if `readOnly` is true, the PVC must be ReadOnlyMany
##
data:
- pvcName: "my-data-pvc"
mountPath: "/data/my-data"
readOnly: false
## the selected podTemplate options
## - these are the user-selected options from the Workspace Spawner UI
## which determine the PodSpec of the Workspace Pod
##
options:
## the id of an imageConfig option
## - options are defined in WorkspaceKind under
## `spec.podTemplate.options.imageConfig.values[]`
##
imageConfig: "jupyterlab_scipy_180"
## the id of a podConfig option
## - options are defined in WorkspaceKind under
## `spec.podTemplate.options.podConfig.values[]`
##
podConfig: "tiny_cpu"

View File

@ -1,9 +1,401 @@
apiVersion: kubeflow.org/v1beta1
kind: WorkspaceKind
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: workspacekind-sample
name: jupyterlab
spec:
# TODO(user): Add fields here
## ================================================================
## SPAWNER CONFIGS
## - how the WorkspaceKind is displayed in the Workspace Spawner UI
## ================================================================
spawner:
## the display name of the WorkspaceKind
displayName: "JupyterLab Notebook"
## the description of the WorkspaceKind
description: "A Workspace which runs JupyterLab in a Pod"
## if this WorkspaceKind should be hidden from the Workspace Spawner UI
hidden: false
## if this WorkspaceKind is deprecated
deprecated: false
## a message to show in Workspace Spawner UI when the WorkspaceKind is deprecated
deprecationMessage: "This WorkspaceKind will be removed on 20XX-XX-XX, please use another WorkspaceKind."
## the icon of the WorkspaceKind
## - a small (favicon-sized) icon used in the Workspace Spawner UI
##
icon:
url: "https://jupyter.org/assets/favicons/apple-touch-icon-152x152.png"
#configMap:
# name: "my-logos"
# key: "apple-touch-icon-152x152.png"
## the logo of the WorkspaceKind
## - a 1:1 (card size) logo used in the Workspace Spawner UI
##
logo:
url: "https://upload.wikimedia.org/wikipedia/commons/3/38/Jupyter_logo.svg"
#configMap:
# name: "my-logos"
# key: "Jupyter_logo.svg"
## ================================================================
## DEFINITION CONFIGS
## - currently the only supported type is `podTemplate`
## - in the future, there will be MORE types like `virtualMachine`
## to run the Workspace on systems like KubeVirt/EC2 rather than in a Pod
## ================================================================
podTemplate:
## metadata for Workspace Pods (MUTABLE)
##
podMetadata:
labels:
my-workspace-kind-label: "my-value"
annotations:
my-workspace-kind-annotation: "my-value"
## service account configs for Workspace Pods
##
serviceAccount:
## the name of the ServiceAccount (NOT MUTABLE)
## - this Service Account MUST already exist in the Namespace
## of the Workspace, the controller will NOT create it
## - we will not show this WorkspaceKind in the Spawner UI
## if the SA does not exist in the Namespace
##
name: "default-editor"
## activity culling configs (MUTABLE)
## - for pausing inactive Workspaces
##
culling:
## if the culling feature is enabled
##
enabled: true
## the maximum number of seconds a Workspace can be inactive
##
maxInactiveSeconds: 86400
## the probe used to determine if the Workspace is active
##
activityProbe:
## OPTION 1: a shell command probe
## - if the Workspace had activity in the last 60 seconds this command
## should return status 0, otherwise it should return status 1
##
#exec:
# command:
# - "bash"
# - "-c"
# - "exit 0"
## OPTION 2: a Jupyter-specific probe
## - will poll the `/api/status` endpoint of the Jupyter API, and use the `last_activity` field
## https://github.com/jupyter-server/jupyter_server/blob/v2.13.0/jupyter_server/services/api/handlers.py#L62-L67
## - note, users need to be careful that their other probes don't trigger a "last_activity" update
## e.g. they should only check the health of Jupyter using the `/api/status` endpoint
##
jupyter:
lastActivity: true
## standard probes to determine Container health (MUTABLE)
## - spec for Probe:
## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#probe-v1-core
##
probes:
startupProbe: {}
livenessProbe: {}
readinessProbe: {}
## volume mount paths
##
volumeMounts:
## the path to mount the home PVC (NOT MUTABLE)
##
home: "/home/jovyan"
## http proxy configs (MUTABLE)
##
httpProxy:
## if the path prefix is stripped from incoming HTTP requests
## - if true, the '/workspace/{profile_name}/{workspace_name}/' path prefix
## is stripped from incoming requests, the application sees the request
## as if it was made to '/...'
## - this only works if the application serves RELATIVE URLs for its assets
##
removePathPrefix: false
## header manipulation rules for incoming HTTP requests
## - sets the `spec.http[].headers.request` of the Istio VirtualService
## https://istio.io/latest/docs/reference/config/networking/virtual-service/#Headers-HeaderOperations
## - the following string templates are available:
## - `.PathPrefix`: the path prefix of the Workspace (e.g. '/workspace/{profile_name}/{workspace_name}/')
##
requestHeaders: {}
#set: { "X-RStudio-Root-Path": "{{ .PathPrefix }}" } # for RStudio
#add: {}
#remove: []
## environment variables for Workspace Pods (MUTABLE)
## - spec for EnvVar:
## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#envvar-v1-core
## - the following go template functions are available:
## - `httpPathPrefix(portId string)`: returns the HTTP path prefix of the specified port
##
extraEnv:
## to enable backwards compatibility with old Jupyter images from Kubeflow Notebooks V1
## https://github.com/kubeflow/kubeflow/blob/v1.8.0/components/example-notebook-servers/jupyter/s6/services.d/jupyterlab/run#L12
- name: "NB_PREFIX"
value: |-
{{ httpPathPrefix "juptyerlab" }}
## extra volume mounts for Workspace Pods (MUTABLE)
## - spec for VolumeMount:
## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#volumemount-v1-core
##
extraVolumeMounts:
## frameworks like PyTorch use shared memory for inter-process communication and expect a tmpfs at /dev/shm
## https://en.wikipedia.org/wiki/Shared_memory
- name: "dshm"
mountPath: "/dev/shm"
## extra volumes for Workspace Pods (MUTABLE)
## - spec for Volume:
## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#volume-v1-core
##
extraVolumes:
- name: "dshm"
emptyDir:
medium: "Memory"
## security context for Workspace Pods (MUTABLE)
## - spec for PodSecurityContext:
## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podsecuritycontext-v1-core
##
securityContext:
fsGroup: 100
## container SecurityContext for Workspace Pods (MUTABLE)
## - spec for SecurityContext:
## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#securitycontext-v1-core
##
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
## ==============================================================
## WORKSPACE OPTIONS
## - options are the user-selectable fields,
## they determine the PodSpec of the Workspace
## ==============================================================
options:
##
## About the `values` fields:
## - the `values` field is a list of options that the user can select
## - elements of `values` can NOT be removed, only HIDDEN or REDIRECTED
## - this prevents options being removed that are still in use by existing Workspaces
## - this limitation may be removed in the future
## - options may be "hidden" by setting `spawner.hidden` to `true`
## - hidden options are NOT selectable in the Spawner UI
## - hidden options are still available to the controller and manually created Workspace resources
## - options may be "redirected" by setting `redirect.to` to another option:
## - redirected options are NOT shown in the Spawner UI
## - redirected options are like an HTTP 302 redirect, the controller will use the target option
## without actually changing the `spec.podTemplate.options` field of the Workspace
## - the Spawner UI will warn users about Workspaces with pending restarts
##
## ============================================================
## IMAGE CONFIG OPTIONS
## - SETS: image, imagePullPolicy, ports
## ============================================================
imageConfig:
## spawner ui configs
##
spawner:
## the id of the default option
## - this will be selected by default in the spawner ui
##
default: "jupyterlab_scipy_190"
## the list of image configs that are available
##
values:
## ================================
## EXAMPLE 1: a hidden option
## ================================
- id: "jupyterlab_scipy_180"
spawner:
displayName: "jupyter-scipy:v1.8.0"
description: "JupyterLab, with SciPy Packages"
labels:
- key: "python_version"
value: "3.11"
hidden: true
redirect:
to: "jupyterlab_scipy_190"
message:
level: "Info" # "Info" | "Warning" | "Danger"
text: "This update will change..."
spec:
## the container image to use
##
image: "docker.io/kubeflownotebookswg/jupyter-scipy:v1.8.0"
## the pull policy for the container image
## - default: "IfNotPresent"
##
imagePullPolicy: "IfNotPresent"
## ports that the container listens on
## - currently, only HTTP is supported for `protocol`
## - currently, all ports use the same `httpProxy` settings
## - if multiple ports are defined, the user will see multiple "Connect" buttons
## in a dropdown menu on the Workspace overview page
##
ports:
- id: "jupyterlab"
displayName: "JupyterLab"
port: 8888
protocol: "HTTP"
## ================================
## EXAMPLE 2: a visible option
## ================================
- id: "jupyterlab_scipy_190"
spawner:
displayName: "jupyter-scipy:v1.9.0"
description: "JupyterLab, with SciPy Packages"
labels:
- key: "python_version"
value: "3.11"
spec:
image: "docker.io/kubeflownotebookswg/jupyter-scipy:v1.9.0"
imagePullPolicy: "IfNotPresent"
ports:
- id: "jupyterlab"
displayName: "JupyterLab"
port: 8888
protocol: "HTTP"
## ============================================================
## POD CONFIG OPTIONS
## - SETS: affinity, nodeSelector, tolerations, resources
## ============================================================
podConfig:
## spawner ui configs
##
spawner:
## the id of the default option
## - this will be selected by default in the spawner ui
##
default: "tiny_cpu"
## the list of pod configs that are available
##
values:
## ================================
## EXAMPLE 1: a tiny CPU pod
## ================================
- id: "tiny_cpu"
spawner:
displayName: "Tiny CPU"
description: "Pod with 0.1 CPU, 128 Mb RAM"
labels:
- key: "cpu"
value: "100m"
- key: "memory"
value: "128Mi"
spec:
resources:
requests:
cpu: 100m
memory: 128Mi
## ================================
## EXAMPLE 2: a small CPU pod
## ================================
- id: "small_cpu"
spawner:
displayName: "Small CPU"
description: "Pod with 1 CPU, 2 GB RAM"
labels:
- key: "cpu"
value: "1000m"
- key: "memory"
value: "2Gi"
hidden: false
spec:
## affinity configs for the pod
## - https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core
##
affinity: {}
## node selector configs for the pod
## - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
##
nodeSelector: {}
## toleration configs for the pod
## - https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core
##
tolerations: []
## resource configs for the "main" container in the pod
## - https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#resourcerequirements-v1-core
##
resources:
requests:
cpu: 1000m
memory: 2Gi
## ================================
## EXAMPLE 3: a big GPU pod
## ================================
- id: "big_gpu"
spawner:
displayName: "Big GPU"
description: "Pod with 4 CPU, 16 GB RAM, and 1 GPU"
labels:
- key: "cpu"
value: "4000m"
- key: "memory"
value: "16Gi"
- key: "gpu"
value: "1"
hidden: false
spec:
affinity: {}
nodeSelector: {}
tolerations:
- key: "nvidia.com/gpu"
operator: "Exists"
effect: "NoSchedule"
resources:
requests:
cpu: 4000m
memory: 16Gi
limits:
nvidia.com/gpu: 1

View File

@ -17,11 +17,19 @@ limitations under the License.
package controller
import (
"context"
"fmt"
"path/filepath"
"runtime"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@ -39,9 +47,15 @@ import (
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var (
testEnv *envtest.Environment
cfg *rest.Config
k8sClient client.Client
ctx context.Context
cancel context.CancelFunc
)
func TestControllers(t *testing.T) {
RegisterFailHandler(Fail)
@ -51,40 +65,355 @@ func TestControllers(t *testing.T) {
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.Background())
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
// The BinaryAssetsDirectory is only required if you want to run the tests directly
// without call the makefile target test. If not informed it will look for the
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
// Note that you must have the required binaries setup under the bin directory to perform
// the tests directly. When we run make test it will be setup and used automatically.
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
// The BinaryAssetsDirectory is only required if you want to run the tests directly without call the makefile target test.
// If not informed it will look for the default path defined in controller-runtime which is /usr/local/kubebuilder/.
// Note that you must have the required binaries setup under the bin directory to perform the tests directly.
// When we run make test it will be setup and used automatically.
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
}
var err error
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
By("setting up the scheme")
err = kubefloworgv1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
By("creating the k8s client")
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
By("setting up the controller manager")
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
Metrics: metricsserver.Options{
BindAddress: "0", // disable metrics serving
},
})
Expect(err).ToNot(HaveOccurred())
By("setting up the Workspace controller")
err = (&WorkspaceReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
By("setting up the WorkspaceKind controller")
err = (&WorkspaceKindReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
go func() {
defer GinkgoRecover()
err = k8sManager.Start(ctx)
Expect(err).ToNot(HaveOccurred(), "failed to run manager")
}()
})
var _ = AfterSuite(func() {
By("stopping the manager")
cancel()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
// NewExampleWorkspace1 returns the common "Workspace 1" object used in tests.
func NewExampleWorkspace1(name string, namespace string, workspaceKind string) *kubefloworgv1beta1.Workspace {
return &kubefloworgv1beta1.Workspace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: kubefloworgv1beta1.WorkspaceSpec{
Paused: ptr.To(false),
DeferUpdates: ptr.To(false),
Kind: workspaceKind,
PodTemplate: kubefloworgv1beta1.WorkspacePodTemplate{
PodMetadata: &kubefloworgv1beta1.WorkspacePodMetadata{
Labels: nil,
Annotations: nil,
},
Volumes: kubefloworgv1beta1.WorkspacePodVolumes{
Home: ptr.To("my-home-pvc"),
Data: []kubefloworgv1beta1.PodVolumeMount{
{
PVCName: "my-data-pvc",
MountPath: "/data/my-data",
ReadOnly: ptr.To(false),
},
},
},
Options: kubefloworgv1beta1.WorkspacePodOptions{
ImageConfig: "jupyterlab_scipy_180",
PodConfig: "tiny_cpu",
},
},
},
}
}
// NewExampleWorkspaceKind1 returns the common "WorkspaceKind 1" object used in tests.
func NewExampleWorkspaceKind1(name string) *kubefloworgv1beta1.WorkspaceKind {
return &kubefloworgv1beta1.WorkspaceKind{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: kubefloworgv1beta1.WorkspaceKindSpec{
Spawner: kubefloworgv1beta1.WorkspaceKindSpawner{
DisplayName: "JupyterLab Notebook",
Description: "A Workspace which runs JupyterLab in a Pod",
Hidden: ptr.To(false),
Deprecated: ptr.To(false),
DeprecationMessage: ptr.To("This WorkspaceKind will be removed on 20XX-XX-XX, please use another WorkspaceKind."),
Icon: kubefloworgv1beta1.WorkspaceKindIcon{
Url: ptr.To("https://jupyter.org/assets/favicons/apple-touch-icon-152x152.png"),
},
Logo: kubefloworgv1beta1.WorkspaceKindIcon{
ConfigMap: &kubefloworgv1beta1.WorkspaceKindConfigMap{
Name: "my-logos",
Key: "apple-touch-icon-152x152.png",
},
},
},
PodTemplate: kubefloworgv1beta1.WorkspaceKindPodTemplate{
PodMetadata: &kubefloworgv1beta1.WorkspaceKindPodMetadata{},
ServiceAccount: kubefloworgv1beta1.WorkspaceKindServiceAccount{
Name: "default-editor",
},
Culling: &kubefloworgv1beta1.WorkspaceKindCullingConfig{
Enabled: ptr.To(true),
MaxInactiveSeconds: ptr.To(int32(86400)),
ActivityProbe: kubefloworgv1beta1.ActivityProbe{
Jupyter: &kubefloworgv1beta1.ActivityProbeJupyter{
LastActivity: true,
},
},
},
Probes: &kubefloworgv1beta1.WorkspaceKindProbes{},
VolumeMounts: kubefloworgv1beta1.WorkspaceKindVolumeMounts{
Home: "/home/jovyan",
},
HTTPProxy: &kubefloworgv1beta1.HTTPProxy{
RemovePathPrefix: ptr.To(false),
RequestHeaders: &kubefloworgv1beta1.IstioHeaderOperations{
Set: map[string]string{"X-RStudio-Root-Path": "{{ .PathPrefix }}"},
Add: map[string]string{},
Remove: []string{},
},
},
ExtraEnv: []v1.EnvVar{
{
Name: "NB_PREFIX",
Value: `{{ httpPathPrefix "jupyterlab" }}`,
},
},
ExtraVolumeMounts: []v1.VolumeMount{
{
Name: "dshm",
MountPath: "/dev/shm",
},
},
ExtraVolumes: []v1.Volume{
{
Name: "dshm",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
Medium: v1.StorageMediumMemory,
},
},
},
},
SecurityContext: &v1.PodSecurityContext{
FSGroup: ptr.To(int64(100)),
},
ContainerSecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: ptr.To(false),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{"ALL"},
},
RunAsNonRoot: ptr.To(true),
},
Options: kubefloworgv1beta1.WorkspaceKindPodOptions{
ImageConfig: kubefloworgv1beta1.ImageConfig{
Spawner: kubefloworgv1beta1.OptionsSpawnerConfig{
Default: "jupyterlab_scipy_190",
},
Values: []kubefloworgv1beta1.ImageConfigValue{
{
Id: "jupyterlab_scipy_180",
Spawner: kubefloworgv1beta1.OptionSpawnerInfo{
DisplayName: "jupyter-scipy:v1.8.0",
Description: ptr.To("JupyterLab, with SciPy Packages"),
Labels: []kubefloworgv1beta1.OptionSpawnerLabel{
{
Key: "python_version",
Value: "3.11",
},
},
Hidden: ptr.To(true),
},
Redirect: &kubefloworgv1beta1.OptionRedirect{
To: "jupyterlab_scipy_190",
Message: &kubefloworgv1beta1.RedirectMessage{
Level: "Info",
Text: "This update will change...",
},
},
Spec: kubefloworgv1beta1.ImageConfigSpec{
Image: "docker.io/kubeflownotebookswg/jupyter-scipy:v1.8.0",
Ports: []kubefloworgv1beta1.ImagePort{
{
Id: "jupyterlab",
DisplayName: "JupyterLab",
Port: 8888,
Protocol: "HTTP",
},
},
},
},
{
Id: "jupyterlab_scipy_190",
Spawner: kubefloworgv1beta1.OptionSpawnerInfo{
DisplayName: "jupyter-scipy:v1.9.0",
Description: ptr.To("JupyterLab, with SciPy Packages"),
Labels: []kubefloworgv1beta1.OptionSpawnerLabel{
{
Key: "python_version",
Value: "3.11",
},
},
},
Spec: kubefloworgv1beta1.ImageConfigSpec{
Image: "docker.io/kubeflownotebookswg/jupyter-scipy:v1.9.0",
Ports: []kubefloworgv1beta1.ImagePort{
{
Id: "jupyterlab",
DisplayName: "JupyterLab",
Port: 8888,
Protocol: "HTTP",
},
},
},
},
},
},
PodConfig: kubefloworgv1beta1.PodConfig{
Spawner: kubefloworgv1beta1.OptionsSpawnerConfig{
Default: "tiny_cpu",
},
Values: []kubefloworgv1beta1.PodConfigValue{
{
Id: "tiny_cpu",
Spawner: kubefloworgv1beta1.OptionSpawnerInfo{
DisplayName: "Tiny CPU",
Description: ptr.To("Pod with 0.1 CPU, 128 MB RAM"),
Labels: []kubefloworgv1beta1.OptionSpawnerLabel{
{
Key: "cpu",
Value: "100m",
},
{
Key: "memory",
Value: "128Mi",
},
},
},
Spec: kubefloworgv1beta1.PodConfigSpec{
Resources: &v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("128Mi"),
},
},
},
},
{
Id: "small_cpu",
Spawner: kubefloworgv1beta1.OptionSpawnerInfo{
DisplayName: "Small CPU",
Description: ptr.To("Pod with 1 CPU, 2 GB RAM"),
Labels: []kubefloworgv1beta1.OptionSpawnerLabel{
{
Key: "cpu",
Value: "1000m",
},
{
Key: "memory",
Value: "2Gi",
},
},
},
Spec: kubefloworgv1beta1.PodConfigSpec{
Resources: &v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
},
},
{
Id: "big_gpu",
Spawner: kubefloworgv1beta1.OptionSpawnerInfo{
DisplayName: "Big GPU",
Description: ptr.To("Pod with 4 CPU, 16 GB RAM, and 1 GPU"),
Labels: []kubefloworgv1beta1.OptionSpawnerLabel{
{
Key: "cpu",
Value: "4000m",
},
{
Key: "memory",
Value: "16Gi",
},
{
Key: "gpu",
Value: "1",
},
},
},
Spec: kubefloworgv1beta1.PodConfigSpec{
Affinity: nil,
NodeSelector: nil,
Tolerations: []v1.Toleration{
{
Key: "nvidia.com/gpu",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
},
Resources: &v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4000m"),
v1.ResourceMemory: resource.MustParse("16Gi"),
},
Limits: map[v1.ResourceName]resource.Quantity{
"nvidia.com/gpu": resource.MustParse("1"),
},
},
},
},
},
},
},
},
},
}
}

View File

@ -17,14 +17,72 @@ limitations under the License.
package controller
import (
"bytes"
"context"
"fmt"
"reflect"
"strings"
"text/template"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/kubeflow/notebooks/workspaces/controller/internal/helper"
"github.com/go-logr/logr"
kubefloworgv1beta1 "github.com/kubeflow/notebooks/workspaces/controller/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
// label keys
workspaceNameLabel = "notebooks.kubeflow.org/workspace-name"
workspaceSelectorLabel = "statefulset"
// KubeBuilder cache fields
kbCacheWorkspaceOwnerKey = ".metadata.controller"
kbCacheWorkspaceKindField = ".spec.kind"
// lengths for resource names
generateNameSuffixLength = 6
maxServiceNameLength = 63
maxStatefulSetNameLength = 52 // https://github.com/kubernetes/kubernetes/issues/64023
// state message formats for Workspace status
stateMsgError = "Workspace has error"
stateMsgErrorUnknownWorkspaceKind = "Workspace references unknown WorkspaceKind: %s"
stateMsgErrorInvalidImageConfig = "Workspace has invalid imageConfig: %s"
stateMsgErrorInvalidPodConfig = "Workspace has invalid podConfig: %s"
stateMsgErrorGenFailureStatefulSet = "Workspace failed to generate StatefulSet with error: %s"
stateMsgErrorGenFailureService = "Workspace failed to generate Service with error: %s"
stateMsgErrorMultipleStatefulSets = "Workspace owns multiple StatefulSets: %s"
stateMsgErrorMultipleServices = "Workspace owns multiple Services: %s"
stateMsgErrorPodCrashLoopBackOff = "Workspace Pod is not running (CrashLoopBackOff)"
stateMsgErrorPodImagePullBackOff = "Workspace Pod is not running (ImagePullBackOff)"
stateMsgPaused = "Workspace is paused"
stateMsgPending = "Workspace is pending"
stateMsgRunning = "Workspace is running"
stateMsgTerminating = "Workspace is terminating"
stateMsgUnknown = "Workspace is in an unknown state"
)
var (
apiGroupVersionStr = kubefloworgv1beta1.GroupVersion.String()
)
// WorkspaceReconciler reconciles a Workspace object
@ -33,30 +91,899 @@ type WorkspaceReconciler struct {
Scheme *runtime.Scheme
}
//+kubebuilder:rbac:groups=kubeflow.org,resources=workspaces,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=kubeflow.org,resources=workspaces/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=kubeflow.org,resources=workspaces/finalizers,verbs=update
// +kubebuilder:rbac:groups=kubeflow.org,resources=workspaces,verbs=create;delete;get;list;patch;update;watch
// +kubebuilder:rbac:groups=kubeflow.org,resources=workspaces/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=kubeflow.org,resources=workspaces/finalizers,verbs=update
// +kubebuilder:rbac:groups=kubeflow.org,resources=workspacekinds,verbs=get;list;watch
// +kubebuilder:rbac:groups=kubeflow.org,resources=workspacekinds/finalizers,verbs=update
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=create;delete;get;list;patch;update;watch
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
// +kubebuilder:rbac:groups=core,resources=services,verbs=create;delete;get;list;patch;update;watch
// +kubebuilder:rbac:groups=networking.istio.io,resources=virtualservices,verbs=create;delete;get;list;patch;update;watch
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the Workspace object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.17.3/pkg/reconcile
func (r *WorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = log.FromContext(ctx)
func (r *WorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { // nolint:gocyclo
log := log.FromContext(ctx)
log.V(2).Info("reconciling Workspace")
// TODO(user): your logic here
// fetch the Workspace
workspace := &kubefloworgv1beta1.Workspace{}
if err := r.Get(ctx, req.NamespacedName, workspace); err != nil {
if client.IgnoreNotFound(err) == nil {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
// Return and don't requeue.
return ctrl.Result{}, nil
}
log.Error(err, "unable to fetch Workspace")
return ctrl.Result{}, err
}
if !workspace.GetDeletionTimestamp().IsZero() {
log.V(2).Info("Workspace is being deleted")
return ctrl.Result{}, nil
}
// fetch the WorkspaceKind
workspaceKindName := workspace.Spec.Kind
log = log.WithValues("workspaceKind", workspaceKindName)
workspaceKind := &kubefloworgv1beta1.WorkspaceKind{}
if err := r.Get(ctx, client.ObjectKey{Name: workspaceKindName}, workspaceKind); err != nil {
if apierrors.IsNotFound(err) {
log.V(0).Info("Workspace references unknown WorkspaceKind")
return r.updateWorkspaceState(ctx, log, workspace,
kubefloworgv1beta1.WorkspaceStateError,
fmt.Sprintf(stateMsgErrorUnknownWorkspaceKind, workspaceKindName),
)
}
log.Error(err, "unable to fetch WorkspaceKind for Workspace")
return ctrl.Result{}, err
}
// add finalizer to WorkspaceKind
// NOTE: finalizers can only be added to non-deleted objects
if workspaceKind.GetDeletionTimestamp().IsZero() {
if !controllerutil.ContainsFinalizer(workspaceKind, workspaceKindFinalizer) {
controllerutil.AddFinalizer(workspaceKind, workspaceKindFinalizer)
if err := r.Update(ctx, workspaceKind); err != nil {
if apierrors.IsConflict(err) {
log.V(2).Info("update conflict while adding finalizer to WorkspaceKind, will requeue")
return ctrl.Result{Requeue: true}, nil
}
log.Error(err, "unable to add finalizer to WorkspaceKind")
return ctrl.Result{}, err
}
}
}
// get the current and desired (after redirects) imageConfig
currentImageConfig, desiredImageConfig, imageConfigRedirectChain, err := getImageConfig(workspace, workspaceKind)
if err != nil {
log.V(0).Info("failed to get imageConfig for Workspace", "error", err.Error())
return r.updateWorkspaceState(ctx, log, workspace,
kubefloworgv1beta1.WorkspaceStateError,
fmt.Sprintf(stateMsgErrorInvalidImageConfig, err.Error()),
)
}
if desiredImageConfig != nil {
workspace.Status.PendingRestart = true
workspace.Status.PodTemplateOptions.ImageConfig.Desired = desiredImageConfig.Id
workspace.Status.PodTemplateOptions.ImageConfig.RedirectChain = imageConfigRedirectChain
} else {
workspace.Status.PodTemplateOptions.ImageConfig.Desired = currentImageConfig.Id
workspace.Status.PodTemplateOptions.ImageConfig.RedirectChain = nil
}
// get the current and desired (after redirects) podConfig
currentPodConfig, desiredPodConfig, podConfigRedirectChain, err := getPodConfig(workspace, workspaceKind)
if err != nil {
log.V(0).Info("failed to get podConfig for Workspace", "error", err.Error())
return r.updateWorkspaceState(ctx, log, workspace,
kubefloworgv1beta1.WorkspaceStateError,
fmt.Sprintf(stateMsgErrorInvalidPodConfig, err.Error()),
)
}
if desiredPodConfig != nil {
workspace.Status.PendingRestart = true
workspace.Status.PodTemplateOptions.PodConfig.Desired = desiredPodConfig.Id
workspace.Status.PodTemplateOptions.PodConfig.RedirectChain = podConfigRedirectChain
} else {
workspace.Status.PodTemplateOptions.PodConfig.Desired = currentPodConfig.Id
workspace.Status.PodTemplateOptions.PodConfig.RedirectChain = nil
}
//
// TODO: in the future, we might want to use "pendingRestart" for other changes to WorkspaceKind that update the PodTemplate
// like `podMetadata`, `probes`, `extraEnv`, or `containerSecurityContext`. But for now, changes to these fields
// will result in a forced restart of all Workspaces using the WorkspaceKind.
//
// if the Workspace is paused and a restart is pending, update the Workspace with the new options
if *workspace.Spec.Paused && workspace.Status.PendingRestart && !*workspace.Spec.DeferUpdates {
workspace.Spec.PodTemplate.Options.ImageConfig = workspace.Status.PodTemplateOptions.ImageConfig.Desired
workspace.Spec.PodTemplate.Options.PodConfig = workspace.Status.PodTemplateOptions.PodConfig.Desired
if err := r.Update(ctx, workspace); err != nil {
if apierrors.IsConflict(err) {
log.V(2).Info("update conflict while updating Workspace, will requeue")
return ctrl.Result{Requeue: true}, nil
}
log.Error(err, "unable to update Workspace")
return ctrl.Result{}, err
}
workspace.Status.PendingRestart = false
if err := r.Status().Update(ctx, workspace); err != nil {
if apierrors.IsConflict(err) {
log.V(2).Info("update conflict while updating Workspace status, will requeue")
return ctrl.Result{Requeue: true}, nil
}
log.Error(err, "unable to update Workspace status")
return ctrl.Result{}, err
}
}
// generate StatefulSet
statefulSet, err := generateStatefulSet(workspace, workspaceKind, currentImageConfig.Spec, currentPodConfig.Spec)
if err != nil {
log.V(0).Info("failed to generate StatefulSet for Workspace", "error", err.Error())
return r.updateWorkspaceState(ctx, log, workspace,
kubefloworgv1beta1.WorkspaceStateError,
fmt.Sprintf(stateMsgErrorGenFailureStatefulSet, err.Error()),
)
}
if err := ctrl.SetControllerReference(workspace, statefulSet, r.Scheme); err != nil {
log.Error(err, "unable to set controller reference on StatefulSet")
return ctrl.Result{}, err
}
// fetch StatefulSets
// NOTE: we filter by StatefulSets that are owned by the Workspace, not by name
// this allows us to generate a random name for the StatefulSet with `metadata.generateName`
var statefulSetName string
ownedStatefulSets := &appsv1.StatefulSetList{}
listOpts := &client.ListOptions{
FieldSelector: fields.OneTermEqualSelector(kbCacheWorkspaceOwnerKey, workspace.Name),
Namespace: req.Namespace,
}
if err := r.List(ctx, ownedStatefulSets, listOpts); err != nil {
log.Error(err, "unable to list StatefulSets")
return ctrl.Result{}, err
}
// reconcile StatefulSet
if len(ownedStatefulSets.Items) > 1 {
statefulSetList := make([]string, len(ownedStatefulSets.Items))
for i, sts := range ownedStatefulSets.Items {
statefulSetList[i] = sts.Name
}
statefulSetListString := strings.Join(statefulSetList, ", ")
log.Error(nil, "Workspace owns multiple StatefulSets", "statefulSets", statefulSetListString)
return r.updateWorkspaceState(ctx, log, workspace,
kubefloworgv1beta1.WorkspaceStateError,
fmt.Sprintf(stateMsgErrorMultipleStatefulSets, statefulSetListString),
)
} else if len(ownedStatefulSets.Items) == 0 {
if err := r.Create(ctx, statefulSet); err != nil {
log.Error(err, "unable to create StatefulSet")
return ctrl.Result{}, err
}
statefulSetName = statefulSet.ObjectMeta.Name
log.V(2).Info("StatefulSet created", "statefulSet", statefulSetName)
} else {
foundStatefulSet := &ownedStatefulSets.Items[0]
statefulSetName = foundStatefulSet.ObjectMeta.Name
if helper.CopyStatefulSetFields(statefulSet, foundStatefulSet) {
if err := r.Update(ctx, foundStatefulSet); err != nil {
if apierrors.IsConflict(err) {
log.V(2).Info("update conflict while updating StatefulSet, will requeue")
return ctrl.Result{Requeue: true}, nil
}
log.Error(err, "unable to update StatefulSet")
return ctrl.Result{}, err
}
log.V(2).Info("StatefulSet updated", "statefulSet", statefulSetName)
}
}
// generate Service
service, err := generateService(workspace, currentImageConfig.Spec)
if err != nil {
log.V(0).Info("failed to generate Service for Workspace", "error", err.Error())
return r.updateWorkspaceState(ctx, log, workspace,
kubefloworgv1beta1.WorkspaceStateError,
fmt.Sprintf(stateMsgErrorGenFailureService, err.Error()),
)
}
if err := ctrl.SetControllerReference(workspace, service, r.Scheme); err != nil {
log.Error(err, "unable to set controller reference on Service")
return ctrl.Result{}, err
}
// fetch Services
// NOTE: we filter by Services that are owned by the Workspace, not by name
// this allows us to generate a random name for the Service with `metadata.generateName`
var serviceName string
ownedServices := &corev1.ServiceList{}
listOpts = &client.ListOptions{
FieldSelector: fields.OneTermEqualSelector(kbCacheWorkspaceOwnerKey, workspace.Name),
Namespace: req.Namespace,
}
if err := r.List(ctx, ownedServices, listOpts); err != nil {
log.Error(err, "unable to list Services")
return ctrl.Result{}, err
}
// reconcile Service
if len(ownedServices.Items) > 1 {
serviceList := make([]string, len(ownedServices.Items))
for i, sts := range ownedServices.Items {
serviceList[i] = sts.Name
}
serviceListString := strings.Join(serviceList, ", ")
log.Error(nil, "Workspace owns multiple Services", "services", serviceListString)
return r.updateWorkspaceState(ctx, log, workspace,
kubefloworgv1beta1.WorkspaceStateError,
fmt.Sprintf(stateMsgErrorMultipleServices, serviceListString),
)
} else if len(ownedServices.Items) == 0 {
if err := r.Create(ctx, service); err != nil {
log.Error(err, "unable to create Service")
return ctrl.Result{}, err
}
serviceName = service.ObjectMeta.Name
log.V(2).Info("Service created", "service", serviceName)
} else {
foundService := &ownedServices.Items[0]
serviceName = foundService.ObjectMeta.Name
if helper.CopyServiceFields(service, foundService) {
if err := r.Update(ctx, foundService); err != nil {
if apierrors.IsConflict(err) {
log.V(2).Info("update conflict while updating Service, will requeue")
return ctrl.Result{Requeue: true}, nil
}
log.Error(err, "unable to update Service")
return ctrl.Result{}, err
}
log.V(2).Info("Service updated", "service", serviceName)
}
}
//
// TODO: reconcile the Istio VirtualService to expose the Workspace
// and implement the `spec.podTemplate.httpProxy` options
//
// fetch Pod
// NOTE: the first StatefulSet Pod is always called "{statefulSetName}-0"
podName := fmt.Sprintf("%s-0", statefulSetName)
pod := &corev1.Pod{}
if err := r.Get(ctx, client.ObjectKey{Name: podName, Namespace: req.Namespace}, pod); err != nil {
if apierrors.IsNotFound(err) {
pod = nil
} else {
log.Error(err, "unable to fetch Pod")
return ctrl.Result{}, err
}
}
//
// TODO: figure out how to set `status.pauseTime`, it will probably have to be done in a webhook
//
// update Workspace status
workspaceStatus := generateWorkspaceStatus(workspace, pod)
if !reflect.DeepEqual(workspace.Status, workspaceStatus) {
workspace.Status = workspaceStatus
if err := r.Status().Update(ctx, workspace); err != nil {
if apierrors.IsConflict(err) {
log.V(2).Info("update conflict while updating Workspace status, will requeue")
return ctrl.Result{Requeue: true}, nil
}
log.Error(err, "unable to update Workspace status")
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *WorkspaceReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Index StatefulSet by owner
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &appsv1.StatefulSet{}, kbCacheWorkspaceOwnerKey, func(rawObj client.Object) []string {
statefulSet := rawObj.(*appsv1.StatefulSet)
owner := metav1.GetControllerOf(statefulSet)
if owner == nil {
return nil
}
if owner.APIVersion != apiGroupVersionStr || owner.Kind != "Workspace" {
return nil
}
return []string{owner.Name}
}); err != nil {
return err
}
// Index Service by owner
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Service{}, kbCacheWorkspaceOwnerKey, func(rawObj client.Object) []string {
service := rawObj.(*corev1.Service)
owner := metav1.GetControllerOf(service)
if owner == nil {
return nil
}
if owner.APIVersion != apiGroupVersionStr || owner.Kind != "Workspace" {
return nil
}
return []string{owner.Name}
}); err != nil {
return err
}
// Index Workspace by WorkspaceKind
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &kubefloworgv1beta1.Workspace{}, kbCacheWorkspaceKindField, func(rawObj client.Object) []string {
ws := rawObj.(*kubefloworgv1beta1.Workspace)
if ws.Spec.Kind == "" {
return nil
}
return []string{ws.Spec.Kind}
}); err != nil {
return err
}
// function to convert pod events to reconcile requests for workspaces
mapPodToRequest := func(ctx context.Context, object client.Object) []reconcile.Request {
return []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Name: object.GetLabels()[workspaceNameLabel],
Namespace: object.GetNamespace(),
},
},
}
}
// predicate function to filter pods that are labeled with the "workspace-name" label key
predPodHasWSLabel := predicate.NewPredicateFuncs(func(object client.Object) bool {
_, labelExists := object.GetLabels()[workspaceNameLabel]
return labelExists
})
return ctrl.NewControllerManagedBy(mgr).
For(&kubefloworgv1beta1.Workspace{}).
Owns(&appsv1.StatefulSet{}).
Owns(&corev1.Service{}).
Watches(
&kubefloworgv1beta1.WorkspaceKind{},
handler.EnqueueRequestsFromMapFunc(r.mapWorkspaceKindToRequest),
builder.WithPredicates(predicate.GenerationChangedPredicate{}),
).
Watches(
&corev1.Pod{},
handler.EnqueueRequestsFromMapFunc(mapPodToRequest),
builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}, predPodHasWSLabel),
).
Complete(r)
}
// updateWorkspaceState attempts to immediately update the Workspace status with the provided state and message
func (r *WorkspaceReconciler) updateWorkspaceState(ctx context.Context, log logr.Logger, workspace *kubefloworgv1beta1.Workspace, state kubefloworgv1beta1.WorkspaceState, message string) (ctrl.Result, error) { // nolint:unparam
if workspace == nil {
return ctrl.Result{}, fmt.Errorf("provided Workspace was nil")
}
if workspace.Status.State != state || workspace.Status.StateMessage != message {
workspace.Status.State = state
workspace.Status.StateMessage = message
if err := r.Status().Update(ctx, workspace); err != nil {
if apierrors.IsConflict(err) {
log.V(2).Info("update conflict while updating Workspace status, will requeue")
return ctrl.Result{Requeue: true}, nil
}
log.Error(err, "unable to update Workspace status")
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// mapWorkspaceKindToRequest converts WorkspaceKind events to reconcile requests for Workspaces
func (r *WorkspaceReconciler) mapWorkspaceKindToRequest(ctx context.Context, workspaceKind client.Object) []reconcile.Request {
attachedWorkspaces := &kubefloworgv1beta1.WorkspaceList{}
listOps := &client.ListOptions{
FieldSelector: fields.OneTermEqualSelector(kbCacheWorkspaceKindField, workspaceKind.GetName()),
Namespace: "", // fetch Workspaces in all namespaces
}
err := r.List(ctx, attachedWorkspaces, listOps)
if err != nil {
return []reconcile.Request{}
}
requests := make([]reconcile.Request, len(attachedWorkspaces.Items))
for i, item := range attachedWorkspaces.Items {
requests[i] = reconcile.Request{
NamespacedName: types.NamespacedName{
Name: item.GetName(),
Namespace: item.GetNamespace(),
},
}
}
return requests
}
// getImageConfig returns the current and desired (after redirects) ImageConfigValues for the Workspace
func getImageConfig(workspace *kubefloworgv1beta1.Workspace, workspaceKind *kubefloworgv1beta1.WorkspaceKind) (*kubefloworgv1beta1.ImageConfigValue, *kubefloworgv1beta1.ImageConfigValue, []kubefloworgv1beta1.WorkspacePodOptionRedirectStep, error) {
imageConfigIdMap := make(map[string]kubefloworgv1beta1.ImageConfigValue)
for _, imageConfig := range workspaceKind.Spec.PodTemplate.Options.ImageConfig.Values {
imageConfigIdMap[imageConfig.Id] = imageConfig
}
// get currently selected imageConfig (ignoring any redirects)
currentImageConfigKey := workspace.Spec.PodTemplate.Options.ImageConfig
currentImageConfig, ok := imageConfigIdMap[currentImageConfigKey]
if !ok {
return nil, nil, nil, fmt.Errorf("imageConfig with id '%s' not found", currentImageConfigKey)
}
// follow any redirects to get the desired imageConfig
desiredImageConfig := currentImageConfig
var redirectChain []kubefloworgv1beta1.WorkspacePodOptionRedirectStep
visitedNodes := map[string]bool{currentImageConfig.Id: true}
for {
if desiredImageConfig.Redirect == nil {
break
}
if visitedNodes[desiredImageConfig.Redirect.To] {
return nil, nil, nil, fmt.Errorf("imageConfig with id '%s' has a circular redirect", desiredImageConfig.Id)
}
nextNode, ok := imageConfigIdMap[desiredImageConfig.Redirect.To]
if !ok {
return nil, nil, nil, fmt.Errorf("imageConfig with id '%s' not found, was redirected from '%s'", desiredImageConfig.Redirect.To, desiredImageConfig.Id)
}
redirectChain = append(redirectChain, kubefloworgv1beta1.WorkspacePodOptionRedirectStep{
Source: desiredImageConfig.Id,
Target: nextNode.Id,
})
desiredImageConfig = nextNode
visitedNodes[desiredImageConfig.Id] = true
}
// if the current imageConfig and desired imageConfig are different, return both
if currentImageConfig.Id != desiredImageConfig.Id {
return &currentImageConfig, &desiredImageConfig, redirectChain, nil
} else {
return &currentImageConfig, nil, nil, nil
}
}
// getPodConfig returns the current and desired (after redirects) PodConfigValues for the Workspace
func getPodConfig(workspace *kubefloworgv1beta1.Workspace, workspaceKind *kubefloworgv1beta1.WorkspaceKind) (*kubefloworgv1beta1.PodConfigValue, *kubefloworgv1beta1.PodConfigValue, []kubefloworgv1beta1.WorkspacePodOptionRedirectStep, error) {
podConfigIdMap := make(map[string]kubefloworgv1beta1.PodConfigValue)
for _, podConfig := range workspaceKind.Spec.PodTemplate.Options.PodConfig.Values {
podConfigIdMap[podConfig.Id] = podConfig
}
// get currently selected podConfig (ignoring any redirects)
currentPodConfigKey := workspace.Spec.PodTemplate.Options.PodConfig
currentPodConfig, ok := podConfigIdMap[currentPodConfigKey]
if !ok {
return nil, nil, nil, fmt.Errorf("podConfig with id '%s' not found", currentPodConfigKey)
}
// follow any redirects to get the desired podConfig
desiredPodConfig := currentPodConfig
var redirectChain []kubefloworgv1beta1.WorkspacePodOptionRedirectStep
visitedNodes := map[string]bool{currentPodConfig.Id: true}
for {
if desiredPodConfig.Redirect == nil {
break
}
if visitedNodes[desiredPodConfig.Redirect.To] {
return nil, nil, nil, fmt.Errorf("podConfig with id '%s' has a circular redirect", desiredPodConfig.Id)
}
nextNode, ok := podConfigIdMap[desiredPodConfig.Redirect.To]
if !ok {
return nil, nil, nil, fmt.Errorf("podConfig with id '%s' not found, was redirected from '%s'", desiredPodConfig.Redirect.To, desiredPodConfig.Id)
}
redirectChain = append(redirectChain, kubefloworgv1beta1.WorkspacePodOptionRedirectStep{
Source: desiredPodConfig.Id,
Target: nextNode.Id,
})
desiredPodConfig = nextNode
visitedNodes[desiredPodConfig.Id] = true
}
// if the current podConfig and desired podConfig are different, return both
if currentPodConfig.Id != desiredPodConfig.Id {
return &currentPodConfig, &desiredPodConfig, redirectChain, nil
} else {
return &currentPodConfig, nil, nil, nil
}
}
// generateNamePrefix generates a name prefix for a Workspace
// the format is "ws-{WORKSPACE_NAME}-" the workspace name is truncated to fit within the max length
func generateNamePrefix(workspaceName string, maxLength int) string {
namePrefix := fmt.Sprintf("ws-%s", workspaceName)
maxLength = maxLength - generateNameSuffixLength // subtract 6 for the `metadata.generateName` suffix
maxLength = maxLength - 1 // subtract 1 for the trailing "-"
if len(namePrefix) > maxLength {
namePrefix = namePrefix[:min(len(namePrefix), maxLength)]
}
if namePrefix[len(namePrefix)-1] != '-' {
namePrefix = namePrefix + "-"
}
return namePrefix
}
// generateStatefulSet generates a StatefulSet for a Workspace
func generateStatefulSet(workspace *kubefloworgv1beta1.Workspace, workspaceKind *kubefloworgv1beta1.WorkspaceKind, imageConfigSpec kubefloworgv1beta1.ImageConfigSpec, podConfigSpec kubefloworgv1beta1.PodConfigSpec) (*appsv1.StatefulSet, error) {
// generate name prefix
namePrefix := generateNamePrefix(workspace.Name, maxStatefulSetNameLength)
// generate replica count
replicas := int32(1)
if *workspace.Spec.Paused {
replicas = int32(0)
}
// generate pod metadata
podAnnotations := labels.Merge(workspaceKind.Spec.PodTemplate.PodMetadata.Annotations, workspace.Spec.PodTemplate.PodMetadata.Annotations)
podLabels := labels.Merge(workspaceKind.Spec.PodTemplate.PodMetadata.Labels, workspace.Spec.PodTemplate.PodMetadata.Labels)
// generate container imagePullPolicy
imagePullPolicy := corev1.PullIfNotPresent
if imageConfigSpec.ImagePullPolicy != nil {
imagePullPolicy = *imageConfigSpec.ImagePullPolicy
}
// define go string template functions
// NOTE: these are used in places like the `extraEnv` values
containerPortsIdMap := make(map[string]kubefloworgv1beta1.ImagePort)
httpPathPrefixFunc := func(portId string) string {
port, ok := containerPortsIdMap[portId]
if ok {
return fmt.Sprintf("/workspace/%s/%s/%s/", workspace.Namespace, workspace.Name, port.Id)
} else {
return ""
}
}
// generate container ports
containerPorts := make([]corev1.ContainerPort, len(imageConfigSpec.Ports))
seenPorts := make(map[int32]bool)
for i, port := range imageConfigSpec.Ports {
if seenPorts[port.Port] {
return nil, fmt.Errorf("duplicate port number %d in imageConfig", port.Port)
}
containerPorts[i] = corev1.ContainerPort{
Name: fmt.Sprintf("http-%d", port.Port),
ContainerPort: port.Port,
Protocol: corev1.ProtocolTCP,
}
seenPorts[port.Port] = true
// NOTE: we construct this map for use in the go string templates
containerPortsIdMap[port.Id] = port
}
// generate container env
containerEnv := make([]corev1.EnvVar, len(workspaceKind.Spec.PodTemplate.ExtraEnv))
for i, env := range workspaceKind.Spec.PodTemplate.ExtraEnv {
if env.Value != "" {
rawValue := env.Value
tmpl, err := template.New("value").
Funcs(template.FuncMap{"httpPathPrefix": httpPathPrefixFunc}).
Parse(rawValue)
if err != nil {
err = fmt.Errorf("failed to parse template for extraEnv '%s': %w", env.Name, err)
return nil, err
}
var buf bytes.Buffer
err = tmpl.Execute(&buf, nil)
if err != nil {
err = fmt.Errorf("failed to execute template for extraEnv '%s': %w", env.Name, err)
return nil, err
}
env.Value = buf.String()
}
containerEnv[i] = env
}
// generate container resources
containerResources := corev1.ResourceRequirements{}
if podConfigSpec.Resources != nil {
containerResources = *podConfigSpec.Resources
}
// generate container probes
var readinessProbe *corev1.Probe
var livenessProbe *corev1.Probe
var startupProbe *corev1.Probe
if workspaceKind.Spec.PodTemplate.Probes != nil {
if workspaceKind.Spec.PodTemplate.Probes.ReadinessProbe != nil {
readinessProbe = workspaceKind.Spec.PodTemplate.Probes.ReadinessProbe
}
if workspaceKind.Spec.PodTemplate.Probes.LivenessProbe != nil {
livenessProbe = workspaceKind.Spec.PodTemplate.Probes.LivenessProbe
}
if workspaceKind.Spec.PodTemplate.Probes.StartupProbe != nil {
startupProbe = workspaceKind.Spec.PodTemplate.Probes.StartupProbe
}
}
// generate volumes and volumeMounts
volumes := make([]corev1.Volume, 0)
volumeMounts := make([]corev1.VolumeMount, 0)
seenVolumeNames := make(map[string]bool)
seenVolumeMountPaths := make(map[string]bool)
// add home volume
if workspace.Spec.PodTemplate.Volumes.Home != nil {
homeVolume := corev1.Volume{
Name: "home-volume",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: *workspace.Spec.PodTemplate.Volumes.Home,
},
},
}
homeVolumeMount := corev1.VolumeMount{
Name: homeVolume.Name,
MountPath: workspaceKind.Spec.PodTemplate.VolumeMounts.Home,
}
seenVolumeNames[homeVolume.Name] = true
seenVolumeMountPaths[homeVolumeMount.MountPath] = true
volumes = append(volumes, homeVolume)
volumeMounts = append(volumeMounts, homeVolumeMount)
}
// add data volumes
for i, data := range workspace.Spec.PodTemplate.Volumes.Data {
dataVolume := corev1.Volume{
Name: fmt.Sprintf("data-volume-%d", i),
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: data.PVCName,
},
},
}
dataVolumeMount := corev1.VolumeMount{
Name: dataVolume.Name,
MountPath: data.MountPath,
}
if *data.ReadOnly {
dataVolume.PersistentVolumeClaim.ReadOnly = true
dataVolumeMount.ReadOnly = true
}
if seenVolumeNames[dataVolume.Name] {
// silently skip duplicate volume names
// NOTE: should not be possible because the home volume uses a different name structure
continue
}
if seenVolumeMountPaths[dataVolumeMount.MountPath] {
// silently skip duplicate mount paths
// NOTE: this will only happen if the user tries to mount a data volume at the same path as the home
continue
}
seenVolumeNames[dataVolume.Name] = true
seenVolumeMountPaths[dataVolumeMount.MountPath] = true
volumes = append(volumes, dataVolume)
volumeMounts = append(volumeMounts, dataVolumeMount)
}
// add extra volumes
for _, extraVolume := range workspaceKind.Spec.PodTemplate.ExtraVolumes {
if seenVolumeNames[extraVolume.Name] {
// silently skip duplicate volume names
continue
}
volumes = append(volumes, extraVolume)
seenVolumeNames[extraVolume.Name] = true
}
// add extra volumeMounts
for _, extraVolumeMount := range workspaceKind.Spec.PodTemplate.ExtraVolumeMounts {
if seenVolumeMountPaths[extraVolumeMount.MountPath] {
// silently skip duplicate mount paths
continue
}
if !seenVolumeNames[extraVolumeMount.Name] {
// silently skip mount paths that reference non-existent volume names
continue
}
volumeMounts = append(volumeMounts, extraVolumeMount)
seenVolumeMountPaths[extraVolumeMount.MountPath] = true
}
// generate StatefulSet
statefulSet := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
GenerateName: namePrefix,
Namespace: workspace.Namespace,
Labels: map[string]string{
workspaceNameLabel: workspace.Name,
},
},
//
// NOTE: if you add new fields, ensure they are reflected in `helper.CopyStatefulSetFields()`
//
Spec: appsv1.StatefulSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
workspaceNameLabel: workspace.Name,
workspaceSelectorLabel: workspace.Name,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: podAnnotations,
Labels: labels.Merge(
podLabels,
map[string]string{
workspaceNameLabel: workspace.Name,
workspaceSelectorLabel: workspace.Name,
},
),
},
Spec: corev1.PodSpec{
Affinity: podConfigSpec.Affinity,
Containers: []corev1.Container{
{
Name: "main",
Image: imageConfigSpec.Image,
ImagePullPolicy: imagePullPolicy,
Ports: containerPorts,
ReadinessProbe: readinessProbe,
LivenessProbe: livenessProbe,
StartupProbe: startupProbe,
SecurityContext: workspaceKind.Spec.PodTemplate.ContainerSecurityContext,
VolumeMounts: volumeMounts,
Env: containerEnv,
Resources: containerResources,
},
},
NodeSelector: podConfigSpec.NodeSelector,
SecurityContext: workspaceKind.Spec.PodTemplate.SecurityContext,
ServiceAccountName: workspaceKind.Spec.PodTemplate.ServiceAccount.Name,
Tolerations: podConfigSpec.Tolerations,
Volumes: volumes,
},
},
},
}
return statefulSet, nil
}
// generateService generates a Service for a Workspace
func generateService(workspace *kubefloworgv1beta1.Workspace, imageConfigSpec kubefloworgv1beta1.ImageConfigSpec) (*corev1.Service, error) {
// generate name prefix
namePrefix := generateNamePrefix(workspace.Name, maxServiceNameLength)
// generate service ports
servicePorts := make([]corev1.ServicePort, len(imageConfigSpec.Ports))
seenPorts := make(map[int32]bool)
for i, port := range imageConfigSpec.Ports {
if seenPorts[port.Port] {
return nil, fmt.Errorf("duplicate port number %d in imageConfig", port.Port)
}
servicePorts[i] = corev1.ServicePort{
Name: fmt.Sprintf("http-%d", port.Port),
TargetPort: intstr.FromInt32(port.Port),
Port: port.Port,
Protocol: corev1.ProtocolTCP,
}
seenPorts[port.Port] = true
}
// generate Service
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: namePrefix,
Namespace: workspace.Namespace,
Labels: map[string]string{
workspaceNameLabel: workspace.Name,
},
},
//
// NOTE: if you add new fields, ensure they are reflected in `helper.CopyServiceFields()`
//
Spec: corev1.ServiceSpec{
Ports: servicePorts,
Selector: map[string]string{
workspaceNameLabel: workspace.Name,
workspaceSelectorLabel: workspace.Name,
},
Type: corev1.ServiceTypeClusterIP,
},
}
return service, nil
}
// generateWorkspaceStatus generates a WorkspaceStatus for a Workspace
func generateWorkspaceStatus(workspace *kubefloworgv1beta1.Workspace, pod *corev1.Pod) kubefloworgv1beta1.WorkspaceStatus {
status := workspace.Status
// cases where the Pod exists
if pod != nil {
// STATUS: Terminating
if pod.GetDeletionTimestamp() != nil {
status.State = kubefloworgv1beta1.WorkspaceStateTerminating
status.StateMessage = stateMsgTerminating
return status
}
// get the pod phase
// https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase
podPhase := pod.Status.Phase
// get the pod conditions
// https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions
podReady := false
for _, condition := range pod.Status.Conditions {
switch condition.Type {
case corev1.PodReady:
podReady = condition.Status == corev1.ConditionTrue
}
}
// get container status
// https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-states
var containerStatus corev1.ContainerStatus
for _, container := range pod.Status.ContainerStatuses {
if container.Name == "main" {
containerStatus = container
break
}
}
// get the container state
containerState := containerStatus.State
// STATUS: Running
if podPhase == corev1.PodRunning && podReady {
status.State = kubefloworgv1beta1.WorkspaceStateRunning
status.StateMessage = stateMsgRunning
return status
}
// STATUS: Error
if containerState.Waiting != nil {
if containerState.Waiting.Reason == "CrashLoopBackOff" {
status.State = kubefloworgv1beta1.WorkspaceStateError
status.StateMessage = stateMsgErrorPodCrashLoopBackOff
return status
}
if containerState.Waiting.Reason == "ImagePullBackOff" {
status.State = kubefloworgv1beta1.WorkspaceStateError
status.StateMessage = stateMsgErrorPodImagePullBackOff
return status
}
}
// STATUS: Pending
if podPhase == corev1.PodPending {
status.State = kubefloworgv1beta1.WorkspaceStatePending
status.StateMessage = stateMsgPending
return status
}
}
// cases where the Pod does not exist
if pod == nil {
// STATUS: Paused
if *workspace.Spec.Paused {
status.State = kubefloworgv1beta1.WorkspaceStatePaused
status.StateMessage = stateMsgPaused
return status
}
}
// STATUS: Unknown
status.State = kubefloworgv1beta1.WorkspaceStateUnknown
status.StateMessage = stateMsgUnknown
return status
}

View File

@ -17,101 +17,85 @@ limitations under the License.
package controller
import (
"context"
"k8s.io/utils/ptr"
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubefloworgv1beta1 "github.com/kubeflow/notebooks/workspaces/controller/api/v1beta1"
)
var _ = Describe("Workspace Controller", func() {
// Define variables to store common objects for tests.
var (
testResource1 *kubefloworgv1beta1.Workspace
)
// Define utility constants and variables for object names and testing.
// Define utility constants for object names and testing timeouts/durations and intervals.
const (
testResourceName1 = "workspace-test"
testResourceNamespace = "default"
namespaceName = "default"
// how long to wait in "Eventually" blocks
timeout = time.Second * 10
// how long to wait in "Consistently" blocks
duration = time.Second * 10
// how frequently to poll for conditions
interval = time.Millisecond * 250
)
BeforeEach(func() {
testResource1 = &kubefloworgv1beta1.Workspace{
ObjectMeta: metav1.ObjectMeta{
Name: testResourceName1,
Namespace: "default",
},
Spec: kubefloworgv1beta1.WorkspaceSpec{
Paused: ptr.To(false),
Kind: "juptyerlab",
PodTemplate: kubefloworgv1beta1.WorkspacePodTemplate{
PodMetadata: &kubefloworgv1beta1.WorkspacePodMetadata{
Labels: nil,
Annotations: nil,
},
Volumes: kubefloworgv1beta1.WorkspacePodVolumes{
Home: "my-home-pvc",
Data: []kubefloworgv1beta1.PodVolumeMount{
{
Name: "my-data-pvc",
MountPath: "/data/my-data",
},
},
},
Options: kubefloworgv1beta1.WorkspacePodOptions{
ImageConfig: "jupyter_scipy_170",
PodConfig: "big_gpu",
},
},
},
}
})
Context("When updating a Workspace", Ordered, func() {
Context("When reconciling a Workspace", func() {
ctx := context.Background()
// Define utility variables for object names.
// NOTE: to avoid conflicts between parallel tests, resource names are unique to each test
var (
workspaceName string
workspaceKindName string
workspaceKey types.NamespacedName
)
typeNamespacedName := types.NamespacedName{
Name: testResourceName1,
Namespace: testResourceNamespace,
}
BeforeAll(func() {
uniqueName := "ws-update-test"
workspaceName = fmt.Sprintf("workspace-%s", uniqueName)
workspaceKindName = fmt.Sprintf("workspacekind-%s", uniqueName)
workspaceKey = types.NamespacedName{Name: workspaceName, Namespace: namespaceName}
workspace := &kubefloworgv1beta1.Workspace{}
By("creating the WorkspaceKind")
workspaceKind := NewExampleWorkspaceKind1(workspaceKindName)
Expect(k8sClient.Create(ctx, workspaceKind)).To(Succeed())
BeforeEach(func() {
By("creating the custom resource for the Kind Workspace")
err := k8sClient.Get(ctx, typeNamespacedName, workspace)
if err != nil && errors.IsNotFound(err) {
resource := testResource1.DeepCopy()
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
} else {
Expect(err).NotTo(HaveOccurred())
}
By("checking if the Workspace exists")
Expect(k8sClient.Get(ctx, typeNamespacedName, workspace)).To(Succeed())
By("creating the Workspace")
workspace := NewExampleWorkspace1(workspaceName, namespaceName, workspaceKindName)
Expect(k8sClient.Create(ctx, workspace)).To(Succeed())
})
AfterEach(func() {
By("checking if the Workspace still exists")
resource := &kubefloworgv1beta1.Workspace{}
err := k8sClient.Get(ctx, typeNamespacedName, resource)
Expect(err).NotTo(HaveOccurred())
AfterAll(func() {
By("deleting the Workspace")
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
workspace := &kubefloworgv1beta1.Workspace{
ObjectMeta: metav1.ObjectMeta{
Name: workspaceName,
Namespace: namespaceName,
},
}
Expect(k8sClient.Delete(ctx, workspace)).To(Succeed())
By("deleting the WorkspaceKind")
workspaceKind := &kubefloworgv1beta1.WorkspaceKind{
ObjectMeta: metav1.ObjectMeta{
Name: workspaceKindName,
},
}
Expect(k8sClient.Delete(ctx, workspaceKind)).To(Succeed())
})
It("should not allow updating immutable fields", func() {
By("getting the Workspace")
workspace := &kubefloworgv1beta1.Workspace{}
Expect(k8sClient.Get(ctx, workspaceKey, workspace)).To(Succeed())
patch := client.MergeFrom(workspace.DeepCopy())
By("failing to update the `spec.kind` field")
@ -119,20 +103,95 @@ var _ = Describe("Workspace Controller", func() {
newWorkspace.Spec.Kind = "new-kind"
Expect(k8sClient.Patch(ctx, newWorkspace, patch)).NotTo(Succeed())
})
})
It("should successfully reconcile the resource", func() {
By("Reconciling the created resource")
controllerReconciler := &WorkspaceReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
}
Context("When reconciling a Workspace", Serial, Ordered, func() {
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: typeNamespacedName,
})
Expect(err).NotTo(HaveOccurred())
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
// Example: If you expect a certain status condition after reconciliation, verify it here.
// Define utility variables for object names.
// NOTE: to avoid conflicts between parallel tests, resource names are unique to each test
var (
workspaceName string
workspaceKindName string
)
BeforeAll(func() {
uniqueName := "ws-reconcile-test"
workspaceName = fmt.Sprintf("workspace-%s", uniqueName)
workspaceKindName = fmt.Sprintf("workspacekind-%s", uniqueName)
})
It("should successfully reconcile the Workspace", func() {
By("creating a WorkspaceKind")
workspaceKind := NewExampleWorkspaceKind1(workspaceKindName)
Expect(k8sClient.Create(ctx, workspaceKind)).To(Succeed())
By("creating a Workspace")
workspace := NewExampleWorkspace1(workspaceName, namespaceName, workspaceKindName)
Expect(k8sClient.Create(ctx, workspace)).To(Succeed())
By("creating a StatefulSet")
statefulSetList := &appsv1.StatefulSetList{}
Eventually(func() ([]appsv1.StatefulSet, error) {
err := k8sClient.List(ctx, statefulSetList, client.InNamespace(namespaceName), client.MatchingLabels{workspaceNameLabel: workspaceName})
if err != nil {
return nil, err
}
return statefulSetList.Items, nil
}).Should(HaveLen(1))
// TODO: use this to get the StatefulSet
//statefulSet := statefulSetList.Items[0]
By("creating a Service")
serviceList := &corev1.ServiceList{}
Eventually(func() ([]corev1.Service, error) {
err := k8sClient.List(ctx, serviceList, client.InNamespace(namespaceName), client.MatchingLabels{workspaceNameLabel: workspaceName})
if err != nil {
return nil, err
}
return serviceList.Items, nil
}).Should(HaveLen(1))
// TODO: use this to get the Service
//service := serviceList.Items[0]
//
// TODO: populate these tests
// - use the CronJob controller tests as a reference
// https://github.com/kubernetes-sigs/kubebuilder/blob/master/docs/book/src/cronjob-tutorial/testdata/project/internal/controller/cronjob_controller_test.go
// - notes:
// - it may make sense to split some of these up into at least separate `It(` specs
// or even separate `Context(` scopes so we can run them in parallel
// - key things to test:
// - core behaviour:
// - resources like Service/StatefulSet/VirtualService/etc are created when the Workspace is created
// - even if the Workspace has a >64 character name, everything still works
// - deleting the reconciled resources, and ensuring they are recreated
// - updating the reconciled resources, and ensuring they are reverted
// - the go templates in WorkspaceKind `spec.podTemplate.extraEnv[].value` should work properly
// - succeed for valid portID
// - return empty string for invalid portID
// - set Workspace to error state for invalid template format (e.g. single quote for portID string)
// - workspace update behaviour:
// - pausing the Workspace results in the StatefulSet being scaled to 0
// - updating the selected options results in the correct resources being updated:
// - imageConfig - updates the StatefulSet and possibly the Service
// - podConfig - updates the StatefulSet
// - workspaceKind redirect behaviour:
// - when adding a redirect to the currently selected `imageConfig` or `podConfig`
// - if the workspace is NOT paused, NO resource changes are made except setting `status.pendingRestart`
// and `status.podTemplateOptions` (`desired` along with `redirectChain`)
// - if the workspace IS paused, but `deferUpdates` is true, the same as above
// - if the workspace IS paused and `deferUpdates` is false:
// - the selected options (under `spec`) should be changed to the redirect
// and `status.pendingRestart` should become false, and `podTemplateOptions` should be empty
// - the new options should be applied to the StatefulSet
// - error states:
// - referencing a missing WorkspaceKind results in error state
// - invalid WorkspaceKind (with bad option redirect - circular / missing) results in error state
// - multiple owned StatefulSets / Services results in error state
//
})
})
})

View File

@ -19,6 +19,16 @@ package controller
import (
"context"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -27,6 +37,10 @@ import (
kubefloworgv1beta1 "github.com/kubeflow/notebooks/workspaces/controller/api/v1beta1"
)
const (
workspaceKindFinalizer = "notebooks.kubeflow.org/workspacekind-protection"
)
// WorkspaceKindReconciler reconciles a WorkspaceKind object
type WorkspaceKindReconciler struct {
client.Client
@ -36,27 +50,124 @@ type WorkspaceKindReconciler struct {
//+kubebuilder:rbac:groups=kubeflow.org,resources=workspacekinds,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=kubeflow.org,resources=workspacekinds/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=kubeflow.org,resources=workspacekinds/finalizers,verbs=update
//+kubebuilder:rbac:groups=kubeflow.org,resources=workspaces,verbs=get;list;watch
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the WorkspaceKind object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.17.3/pkg/reconcile
func (r *WorkspaceKindReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = log.FromContext(ctx)
log := log.FromContext(ctx)
log.V(2).Info("reconciling WorkspaceKind")
// TODO(user): your logic here
// fetch the WorkspaceKind
workspaceKind := &kubefloworgv1beta1.WorkspaceKind{}
if err := r.Get(ctx, req.NamespacedName, workspaceKind); err != nil {
if client.IgnoreNotFound(err) == nil {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
// Return and don't requeue.
return ctrl.Result{}, nil
}
log.Error(err, "unable to fetch WorkspaceKind")
return ctrl.Result{}, err
}
// fetch all Workspaces that are using this WorkspaceKind
workspaces := &kubefloworgv1beta1.WorkspaceList{}
listOpts := &client.ListOptions{
FieldSelector: fields.OneTermEqualSelector(kbCacheWorkspaceKindField, workspaceKind.Name),
Namespace: "", // fetch Workspaces in all namespaces
}
if err := r.List(ctx, workspaces, listOpts); err != nil {
log.Error(err, "unable to list Workspaces")
return ctrl.Result{}, err
}
// if no Workspaces are using this WorkspaceKind, remove the finalizer
numWorkspace := len(workspaces.Items)
if numWorkspace == 0 {
if controllerutil.ContainsFinalizer(workspaceKind, workspaceKindFinalizer) {
controllerutil.RemoveFinalizer(workspaceKind, workspaceKindFinalizer)
if err := r.Update(ctx, workspaceKind); err != nil {
if apierrors.IsConflict(err) {
log.V(2).Info("update conflict while removing finalizer from WorkspaceKind, will requeue")
return ctrl.Result{Requeue: true}, nil
}
log.Error(err, "unable to remove finalizer from WorkspaceKind")
return ctrl.Result{}, err
}
}
}
// count the number of Workspaces using each option
imageConfigCount := make(map[string]int32)
podConfigCount := make(map[string]int32)
for _, imageConfig := range workspaceKind.Spec.PodTemplate.Options.ImageConfig.Values {
imageConfigCount[imageConfig.Id] = 0
}
for _, podConfig := range workspaceKind.Spec.PodTemplate.Options.PodConfig.Values {
podConfigCount[podConfig.Id] = 0
}
for _, ws := range workspaces.Items {
imageConfigCount[ws.Spec.PodTemplate.Options.ImageConfig]++
podConfigCount[ws.Spec.PodTemplate.Options.PodConfig]++
}
// calculate the metrics for the WorkspaceKind
imageConfigMetrics := make([]kubefloworgv1beta1.OptionMetric, len(workspaceKind.Spec.PodTemplate.Options.ImageConfig.Values))
podConfigMetrics := make([]kubefloworgv1beta1.OptionMetric, len(workspaceKind.Spec.PodTemplate.Options.PodConfig.Values))
for i, imageConfig := range workspaceKind.Spec.PodTemplate.Options.ImageConfig.Values {
imageConfigMetrics[i] = kubefloworgv1beta1.OptionMetric{
Id: imageConfig.Id,
Workspaces: imageConfigCount[imageConfig.Id],
}
}
for i, podConfig := range workspaceKind.Spec.PodTemplate.Options.PodConfig.Values {
podConfigMetrics[i] = kubefloworgv1beta1.OptionMetric{
Id: podConfig.Id,
Workspaces: podConfigCount[podConfig.Id],
}
}
// update the WorkspaceKind status
workspaceKind.Status.Workspaces = int32(numWorkspace)
workspaceKind.Status.PodTemplateOptions.ImageConfig = imageConfigMetrics
workspaceKind.Status.PodTemplateOptions.PodConfig = podConfigMetrics
if err := r.Status().Update(ctx, workspaceKind); err != nil {
if apierrors.IsConflict(err) {
log.V(2).Info("update conflict while updating WorkspaceKind status, will requeue")
return ctrl.Result{Requeue: true}, nil
}
log.Error(err, "unable to update WorkspaceKind status")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *WorkspaceKindReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Index Workspace by WorkspaceKind
// NOTE: the Workspace index is defined in the SetupWithManager function of the WorkspaceReconciler.
// these controllers always share a manager (in both `main.go` and `suite_test.go`),
// so initializing the same index twice would result in a conflict.
// function to convert Workspace events to reconcile requests for WorkspaceKinds
mapWorkspaceToRequest := func(ctx context.Context, object client.Object) []reconcile.Request {
return []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Name: object.(*kubefloworgv1beta1.Workspace).Spec.Kind,
},
},
}
}
return ctrl.NewControllerManagedBy(mgr).
For(&kubefloworgv1beta1.WorkspaceKind{}).
Watches(
&kubefloworgv1beta1.Workspace{},
handler.EnqueueRequestsFromMapFunc(mapWorkspaceToRequest),
builder.WithPredicates(predicate.GenerationChangedPredicate{}),
).
Complete(r)
}

View File

@ -17,247 +17,116 @@ limitations under the License.
package controller
import (
"context"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
kubefloworgv1beta1 "github.com/kubeflow/notebooks/workspaces/controller/api/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubefloworgv1beta1 "github.com/kubeflow/notebooks/workspaces/controller/api/v1beta1"
)
var _ = Describe("WorkspaceKind Controller", func() {
// Define variables to store common objects for tests.
var (
testResource1 *kubefloworgv1beta1.WorkspaceKind
)
// Define utility constants and variables for object names and testing.
// Define utility constants for object names and testing timeouts/durations and intervals.
const (
testResourceName1 = "jupyterlab"
namespaceName = "default"
// how long to wait in "Eventually" blocks
timeout = time.Second * 10
// how long to wait in "Consistently" blocks
duration = time.Second * 10
// how frequently to poll for conditions
interval = time.Millisecond * 250
)
BeforeEach(func() {
testResource1 = &kubefloworgv1beta1.WorkspaceKind{
ObjectMeta: metav1.ObjectMeta{
Name: testResourceName1,
},
Spec: kubefloworgv1beta1.WorkspaceKindSpec{
Spawner: kubefloworgv1beta1.WorkspaceKindSpawner{
DisplayName: "JupyterLab Notebook",
Description: "A Workspace which runs JupyterLab in a Pod",
Hidden: ptr.To(false),
Deprecated: ptr.To(false),
DeprecationMessage: ptr.To("This WorkspaceKind will be removed on 20XX-XX-XX, please use another WorkspaceKind."),
Icon: kubefloworgv1beta1.WorkspaceKindIcon{
Url: ptr.To("https://jupyter.org/assets/favicons/apple-touch-icon-152x152.png"),
},
Logo: kubefloworgv1beta1.WorkspaceKindIcon{
ConfigMap: &kubefloworgv1beta1.WorkspaceKindConfigMap{
Name: "my-logos",
Key: "apple-touch-icon-152x152.png",
},
},
},
PodTemplate: kubefloworgv1beta1.WorkspaceKindPodTemplate{
PodMetadata: &kubefloworgv1beta1.WorkspaceKindPodMetadata{},
ServiceAccount: kubefloworgv1beta1.WorkspaceKindServiceAccount{
Name: "default-editor",
},
Culling: &kubefloworgv1beta1.WorkspaceKindCullingConfig{
Enabled: ptr.To(true),
MaxInactiveSeconds: ptr.To(int64(86400)),
ActivityProbe: kubefloworgv1beta1.ActivityProbe{
Exec: &kubefloworgv1beta1.ActivityProbeExec{
Command: []string{"bash", "-c", "exit 0"},
},
},
},
Probes: &kubefloworgv1beta1.WorkspaceKindProbes{},
VolumeMounts: kubefloworgv1beta1.WorkspaceKindVolumeMounts{
Home: "/home/jovyan",
},
HTTPProxy: &kubefloworgv1beta1.HTTPProxy{
RemovePathPrefix: ptr.To(false),
RequestHeaders: &kubefloworgv1beta1.IstioHeaderOperations{
Set: map[string]string{"X-RStudio-Root-Path": "{{ .PathPrefix }}"},
Add: map[string]string{},
Remove: []string{},
},
},
ExtraEnv: []v1.EnvVar{
{
Name: "NB_PREFIX",
Value: "{{ .PathPrefix }}",
},
},
ContainerSecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: ptr.To(false),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{"ALL"},
},
RunAsNonRoot: ptr.To(true),
},
Options: kubefloworgv1beta1.WorkspaceKindPodOptions{
ImageConfig: kubefloworgv1beta1.ImageConfig{
Default: "jupyter_scipy_171",
Values: []kubefloworgv1beta1.ImageConfigValue{
{
Id: "jupyter_scipy_170",
Spawner: kubefloworgv1beta1.OptionSpawnerInfo{
DisplayName: "jupyter-scipy:v1.7.0",
Description: ptr.To("JupyterLab 1.7.0, with SciPy Packages"),
Hidden: ptr.To(true),
},
Redirect: &kubefloworgv1beta1.OptionRedirect{
To: "jupyter_scipy_171",
WaitForRestart: true,
Message: &kubefloworgv1beta1.RedirectMessage{
Level: "Info",
Text: "This update will increase the version of JupyterLab to v1.7.1",
},
},
Spec: kubefloworgv1beta1.ImageConfigSpec{
Image: "docker.io/kubeflownotebookswg/jupyter-scipy:v1.7.0",
Ports: []kubefloworgv1beta1.ImagePort{
{
DisplayName: "JupyterLab",
Port: 8888,
Protocol: "HTTP",
},
},
},
},
},
},
PodConfig: kubefloworgv1beta1.PodConfig{
Default: "small_cpu",
Values: []kubefloworgv1beta1.PodConfigValue{
{
Id: "small_cpu",
Spawner: kubefloworgv1beta1.OptionSpawnerInfo{
DisplayName: "Small CPU",
Description: ptr.To("Pod with 1 CPU, 2 GB RAM, and 1 GPU"),
Hidden: ptr.To(false),
},
Redirect: nil,
Spec: kubefloworgv1beta1.PodConfigSpec{
Resources: &v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
},
},
{
Id: "big_gpu",
Spawner: kubefloworgv1beta1.OptionSpawnerInfo{
DisplayName: "Big GPU",
Description: ptr.To("Pod with 4 CPUs, 16 GB RAM, and 1 GPU"),
Hidden: ptr.To(false),
},
Redirect: nil,
Spec: kubefloworgv1beta1.PodConfigSpec{
Affinity: nil,
NodeSelector: nil,
Tolerations: []v1.Toleration{
{
Key: "nvidia.com/gpu",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
},
Resources: &v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("16Gi"),
},
Limits: map[v1.ResourceName]resource.Quantity{
"nvidia.com/gpu": resource.MustParse("1"),
},
},
},
},
},
},
},
},
},
}
})
Context("When updating a WorkspaceKind", Ordered, func() {
Context("When reconciling a WorkspaceKind", func() {
ctx := context.Background()
// Define utility variables for object names.
// NOTE: to avoid conflicts between parallel tests, resource names are unique to each test
var (
workspaceName string
workspaceKindName string
workspaceKindKey types.NamespacedName
)
typeNamespacedName := types.NamespacedName{
Name: testResourceName1,
}
BeforeAll(func() {
uniqueName := "wsk-update-test"
workspaceName = fmt.Sprintf("workspace-%s", uniqueName)
workspaceKindName = fmt.Sprintf("workspacekind-%s", uniqueName)
workspaceKindKey = types.NamespacedName{Name: workspaceKindName}
workspacekind := &kubefloworgv1beta1.WorkspaceKind{}
By("creating the WorkspaceKind")
workspaceKind := NewExampleWorkspaceKind1(workspaceKindName)
Expect(k8sClient.Create(ctx, workspaceKind)).To(Succeed())
BeforeEach(func() {
By("creating a new WorkspaceKind")
err := k8sClient.Get(ctx, typeNamespacedName, workspacekind)
if err != nil && errors.IsNotFound(err) {
resource := testResource1.DeepCopy()
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
} else {
Expect(err).NotTo(HaveOccurred())
}
By("checking if the WorkspaceKind exists")
Expect(k8sClient.Get(ctx, typeNamespacedName, workspacekind)).To(Succeed())
By("creating the Workspace")
workspace := NewExampleWorkspace1(workspaceName, namespaceName, workspaceKindName)
Expect(k8sClient.Create(ctx, workspace)).To(Succeed())
})
AfterEach(func() {
By("checking if the WorkspaceKind still exists")
resource := &kubefloworgv1beta1.WorkspaceKind{}
err := k8sClient.Get(ctx, typeNamespacedName, resource)
Expect(err).NotTo(HaveOccurred())
AfterAll(func() {
By("deleting the Workspace")
workspace := &kubefloworgv1beta1.Workspace{
ObjectMeta: metav1.ObjectMeta{
Name: workspaceName,
Namespace: namespaceName,
},
}
Expect(k8sClient.Delete(ctx, workspace)).To(Succeed())
By("deleting the WorkspaceKind")
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
workspaceKind := &kubefloworgv1beta1.WorkspaceKind{
ObjectMeta: metav1.ObjectMeta{
Name: workspaceKindName,
},
}
Expect(k8sClient.Delete(ctx, workspaceKind)).To(Succeed())
})
It("should not allow updating immutable fields", func() {
patch := client.MergeFrom(workspacekind.DeepCopy())
By("getting the WorkspaceKind")
workspaceKind := &kubefloworgv1beta1.WorkspaceKind{}
Expect(k8sClient.Get(ctx, workspaceKindKey, workspaceKind)).To(Succeed())
patch := client.MergeFrom(workspaceKind.DeepCopy())
By("failing to update the `spec.podTemplate.serviceAccount.name` field")
newWorkspaceKind := workspacekind.DeepCopy()
newWorkspaceKind := workspaceKind.DeepCopy()
newWorkspaceKind.Spec.PodTemplate.ServiceAccount.Name = "new-editor"
Expect(k8sClient.Patch(ctx, newWorkspaceKind, patch)).NotTo(Succeed())
By("failing to update the `spec.podTemplate.volumeMounts.home` field")
newWorkspaceKind = workspacekind.DeepCopy()
newWorkspaceKind = workspaceKind.DeepCopy()
newWorkspaceKind.Spec.PodTemplate.VolumeMounts.Home = "/home/jovyan/new"
Expect(k8sClient.Patch(ctx, newWorkspaceKind, patch)).NotTo(Succeed())
By("failing to update the `spec.podTemplate.options.imageConfig.values[0].spec` field")
newWorkspaceKind = workspacekind.DeepCopy()
newWorkspaceKind = workspaceKind.DeepCopy()
newWorkspaceKind.Spec.PodTemplate.Options.ImageConfig.Values[0].Spec.Image = "new-image:latest"
Expect(k8sClient.Patch(ctx, newWorkspaceKind, patch)).NotTo(Succeed())
By("failing to update the `spec.podTemplate.options.podConfig.values[0].spec` field")
newWorkspaceKind = workspacekind.DeepCopy()
newWorkspaceKind = workspaceKind.DeepCopy()
newWorkspaceKind.Spec.PodTemplate.Options.PodConfig.Values[0].Spec.Resources.Requests[v1.ResourceCPU] = resource.MustParse("99")
Expect(k8sClient.Patch(ctx, newWorkspaceKind, patch)).NotTo(Succeed())
})
It("should not allow mutually exclusive fields to be set", func() {
patch := client.MergeFrom(workspacekind.DeepCopy())
By("getting the WorkspaceKind")
workspaceKind := &kubefloworgv1beta1.WorkspaceKind{}
Expect(k8sClient.Get(ctx, workspaceKindKey, workspaceKind)).To(Succeed())
patch := client.MergeFrom(workspaceKind.DeepCopy())
By("only allowing one of `spec.spawner.icon.{url,configMap}` to be set")
newWorkspaceKind := workspacekind.DeepCopy()
newWorkspaceKind := workspaceKind.DeepCopy()
newWorkspaceKind.Spec.Spawner.Icon = kubefloworgv1beta1.WorkspaceKindIcon{
Url: ptr.To("https://example.com/icon.png"),
ConfigMap: &kubefloworgv1beta1.WorkspaceKindConfigMap{
@ -268,7 +137,7 @@ var _ = Describe("WorkspaceKind Controller", func() {
Expect(k8sClient.Patch(ctx, newWorkspaceKind, patch)).NotTo(Succeed())
By("only allowing one of `spec.podTemplate.culling.activityProbe.{exec,jupyter}` to be set")
newWorkspaceKind = workspacekind.DeepCopy()
newWorkspaceKind = workspaceKind.DeepCopy()
newWorkspaceKind.Spec.PodTemplate.Culling.ActivityProbe = kubefloworgv1beta1.ActivityProbe{
Exec: &kubefloworgv1beta1.ActivityProbeExec{
Command: []string{"bash", "-c", "exit 0"},
@ -279,20 +148,121 @@ var _ = Describe("WorkspaceKind Controller", func() {
}
Expect(k8sClient.Patch(ctx, newWorkspaceKind, patch)).NotTo(Succeed())
})
})
It("should successfully reconcile the resource", func() {
By("Reconciling the created resource")
controllerReconciler := &WorkspaceKindReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
Context("When reconciling a WorkspaceKind", Serial, Ordered, func() {
// Define utility variables for object names.
// NOTE: to avoid conflicts between parallel tests, resource names are unique to each test
var (
workspaceName string
workspaceKindName string
workspaceKindKey types.NamespacedName
)
BeforeAll(func() {
uniqueName := "wsk-reconcile-test"
workspaceName = fmt.Sprintf("workspace-%s", uniqueName)
workspaceKindName = fmt.Sprintf("workspacekind-%s", uniqueName)
workspaceKindKey = types.NamespacedName{Name: workspaceKindName}
})
It("should update the WorkspaceKind status", func() {
By("creating a WorkspaceKind")
workspaceKind := NewExampleWorkspaceKind1(workspaceKindName)
Expect(k8sClient.Create(ctx, workspaceKind)).To(Succeed())
By("creating a Workspace")
workspace := NewExampleWorkspace1(workspaceName, namespaceName, workspaceKindName)
Expect(k8sClient.Create(ctx, workspace)).To(Succeed())
By("reconciling the WorkspaceKind status")
expectedStatus := &kubefloworgv1beta1.WorkspaceKindStatus{
Workspaces: 1,
PodTemplateOptions: kubefloworgv1beta1.PodTemplateOptionsMetrics{
ImageConfig: []kubefloworgv1beta1.OptionMetric{
{
Id: "jupyterlab_scipy_180",
Workspaces: 1,
},
{
Id: "jupyterlab_scipy_190",
Workspaces: 0,
},
},
PodConfig: []kubefloworgv1beta1.OptionMetric{
{
Id: "tiny_cpu",
Workspaces: 1,
},
{
Id: "small_cpu",
Workspaces: 0,
},
{
Id: "big_gpu",
Workspaces: 0,
},
},
},
}
Eventually(func() *kubefloworgv1beta1.WorkspaceKindStatus {
if err := k8sClient.Get(ctx, workspaceKindKey, workspaceKind); err != nil {
return nil
}
return &workspaceKind.Status
}, timeout, interval).Should(Equal(expectedStatus))
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: typeNamespacedName,
})
Expect(err).NotTo(HaveOccurred())
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
// Example: If you expect a certain status condition after reconciliation, verify it here.
By("having a finalizer set on the WorkspaceKind")
Expect(workspaceKind.GetFinalizers()).To(ContainElement(workspaceKindFinalizer))
By("deleting the Workspace")
Expect(k8sClient.Delete(ctx, workspace)).To(Succeed())
By("reconciling the WorkspaceKind status")
expectedStatus = &kubefloworgv1beta1.WorkspaceKindStatus{
Workspaces: 0,
PodTemplateOptions: kubefloworgv1beta1.PodTemplateOptionsMetrics{
ImageConfig: []kubefloworgv1beta1.OptionMetric{
{
Id: "jupyterlab_scipy_180",
Workspaces: 0,
},
{
Id: "jupyterlab_scipy_190",
Workspaces: 0,
},
},
PodConfig: []kubefloworgv1beta1.OptionMetric{
{
Id: "tiny_cpu",
Workspaces: 0,
},
{
Id: "small_cpu",
Workspaces: 0,
},
{
Id: "big_gpu",
Workspaces: 0,
},
},
},
}
Eventually(func() *kubefloworgv1beta1.WorkspaceKindStatus {
if err := k8sClient.Get(ctx, workspaceKindKey, workspaceKind); err != nil {
return nil
}
return &workspaceKind.Status
}, timeout, interval).Should(Equal(expectedStatus))
By("having no finalizer set on the WorkspaceKind")
Expect(workspaceKind.GetFinalizers()).To(BeEmpty())
By("deleting the WorkspaceKind")
Expect(k8sClient.Delete(ctx, workspaceKind)).To(Succeed())
Expect(k8sClient.Get(ctx, workspaceKindKey, workspaceKind)).ToNot(Succeed())
})
})
})

View File

@ -0,0 +1,100 @@
package helper
import (
"reflect"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
// CopyStatefulSetFields updates a target StatefulSet with the fields from a desired StatefulSet, returning true if an update is required.
func CopyStatefulSetFields(desired *appsv1.StatefulSet, target *appsv1.StatefulSet) bool {
requireUpdate := false
// copy `metadata.labels`
for k, v := range target.Labels {
if desired.Labels[k] != v {
requireUpdate = true
}
}
target.Labels = desired.Labels
// copy `metadata.annotations`
for k, v := range target.Annotations {
if desired.Annotations[k] != v {
requireUpdate = true
}
}
target.Annotations = desired.Annotations
// copy `spec.replicas`
if *desired.Spec.Replicas != *target.Spec.Replicas {
*target.Spec.Replicas = *desired.Spec.Replicas
requireUpdate = true
}
// copy `spec.selector`
//
// TODO: confirm if StatefulSets support updates to the selector
// if not, we might need to recreate the StatefulSet
//
if !reflect.DeepEqual(target.Spec.Selector, desired.Spec.Selector) {
target.Spec.Selector = desired.Spec.Selector
requireUpdate = true
}
// copy `spec.template`
//
// TODO: confirm if there is a problem with doing the update at the `spec.template` level
// or if only `spec.template.spec` should be updated
//
if !reflect.DeepEqual(target.Spec.Template, desired.Spec.Template) {
target.Spec.Template = desired.Spec.Template
requireUpdate = true
}
return requireUpdate
}
// CopyServiceFields updates a target Service with the fields from a desired Service, returning true if an update is required.
func CopyServiceFields(desired *corev1.Service, target *corev1.Service) bool {
requireUpdate := false
// copy `metadata.labels`
for k, v := range target.Labels {
if desired.Labels[k] != v {
requireUpdate = true
}
}
target.Labels = desired.Labels
// copy `metadata.annotations`
for k, v := range target.Annotations {
if desired.Annotations[k] != v {
requireUpdate = true
}
}
target.Annotations = desired.Annotations
// NOTE: we don't copy the entire `spec` because we can't overwrite the `spec.clusterIp` and similar fields
// copy `spec.ports`
if !reflect.DeepEqual(target.Spec.Ports, desired.Spec.Ports) {
target.Spec.Ports = desired.Spec.Ports
requireUpdate = true
}
// copy `spec.selector`
if !reflect.DeepEqual(target.Spec.Selector, desired.Spec.Selector) {
target.Spec.Selector = desired.Spec.Selector
requireUpdate = true
}
// copy `spec.type`
if target.Spec.Type != desired.Spec.Type {
target.Spec.Type = desired.Spec.Type
requireUpdate = true
}
return requireUpdate
}