chore: update controller manifests to follow conventions

This commit brings about various changes to improve naming consistency across the `workspaces/` components.

The original `controller/` manifests were generated by kubebuilder and were not aligned with the conventions we have established in the `backend` and `frontend` components.  Changing the manifests also had a "trickle effect" of requiring minor modifications to the `Makefile` as well as some e2e tests and documentation.

Key manifest changes:
- dropped `namePrefix` transformation
- standardized on `kubeflow-workspaces` namespace
- leveraged name of `workspaces-controller` where sensible
- Standardized `app.kubernetes.io/*` labels across all resources
    - leveraging `labels` transformation as much as possible
        - selectively applied `includeSelectors: true` when appropriate
- Removed legacy control-plane=controller-manager labels
    - replaced with appropriate `app.kubernetes.io/*` labels to keep behavioral parity

Signed-off-by: Andy Stoneberg <astonebe@redhat.com>
This commit is contained in:
Andy Stoneberg 2025-09-12 14:17:06 -04:00
parent 5d91ee05b2
commit aa26b048b9
No known key found for this signature in database
28 changed files with 73 additions and 120 deletions

View File

@ -1,5 +1,8 @@
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
TAG ?= $(shell git describe --tags --always --dirty)
IMG ?= ghcr.io/kubeflow/notebooks/workspaces-controller:$(TAG)
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.31.0
@ -128,7 +131,7 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform
.PHONY: build-installer
build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment.
mkdir -p dist
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
cd config/manager && $(KUSTOMIZE) edit set image workspaces-controller=${IMG}
$(KUSTOMIZE) build config/default > dist/install.yaml
##@ Deployment

View File

@ -5,7 +5,7 @@
domain: kubeflow.org
layout:
- go.kubebuilder.io/v4
projectName: workspace-controller
projectName: workspaces-controller
repo: github.com/kubeflow/notebooks/workspaces/controller
resources:
- api:

View File

@ -18,7 +18,7 @@ The Kubeflow Workspace Controller is responsible for reconciling the `Workspace`
**Build and push your image to the location specified by `IMG`:**
```sh
make docker-build docker-push IMG=<some-registry>/workspace-controller:tag
make docker-build docker-push IMG=<some-registry>/workspaces-controller:tag
```
**NOTE:** This image ought to be published in the personal registry you specified.
@ -34,7 +34,7 @@ make install
**Deploy the Manager to the cluster with the image specified by `IMG`:**
```sh
make deploy IMG=<some-registry>/workspace-controller:tag
make deploy IMG=<some-registry>/workspaces-controller:tag
```
> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin
@ -75,7 +75,7 @@ Following are the steps to build the installer and distribute this project to us
1. Build the installer for the image built and published in the registry:
```sh
make build-installer IMG=<some-registry>/workspace-controller:tag
make build-installer IMG=<some-registry>/workspaces-controller:tag
```
NOTE: The makefile target mentioned above generates an 'install.yaml'
@ -88,5 +88,5 @@ its dependencies.
Users can just run kubectl apply -f <URL for YAML BUNDLE> to install the project, i.e.:
```sh
kubectl apply -f https://raw.githubusercontent.com/<org>/workspace-controller/<tag or branch>/dist/install.yaml
kubectl apply -f https://raw.githubusercontent.com/<org>/workspaces-controller/<tag or branch>/dist/install.yaml
```

View File

@ -5,14 +5,8 @@ apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
labels:
app.kubernetes.io/name: certificate
app.kubernetes.io/instance: serving-cert
app.kubernetes.io/component: certificate
app.kubernetes.io/created-by: workspace-controller
app.kubernetes.io/part-of: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: selfsigned-issuer
namespace: system
spec:
selfSigned: {}
---
@ -20,14 +14,8 @@ apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
labels:
app.kubernetes.io/name: certificate
app.kubernetes.io/instance: serving-cert
app.kubernetes.io/component: certificate
app.kubernetes.io/created-by: workspace-controller
app.kubernetes.io/part-of: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
namespace: system
spec:
# SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize
dnsNames:

View File

@ -1,3 +1,7 @@
labels:
- pairs:
app.kubernetes.io/component: webhook
resources:
- certificate.yaml

View File

@ -1,18 +1,12 @@
# Adds namespace to all resources.
namespace: workspace-controller-system
namespace: kubeflow-workspaces
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: workspace-controller-
# Labels to add to all resources and selectors.
#labels:
#- includeSelectors: true
# pairs:
# someName: someValue
labels:
- includeSelectors: true
pairs:
app.kubernetes.io/managed-by: kustomize
app.kubernetes.io/name: workspaces-controller
app.kubernetes.io/part-of: kubeflow-workspaces
resources:
- ../crd

View File

@ -1,8 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
name: workspaces-controller
spec:
template:
spec:

View File

@ -2,8 +2,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
name: workspaces-controller
spec:
template:
spec:

View File

@ -1,8 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
name: workspaces-controller
spec:
template:
spec:

View File

@ -4,12 +4,7 @@ apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/name: validatingwebhookconfiguration
app.kubernetes.io/instance: validating-webhook-configuration
app.kubernetes.io/component: webhook
app.kubernetes.io/created-by: workspace-controller
app.kubernetes.io/part-of: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: validating-webhook-configuration
annotations:
cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME

View File

@ -2,7 +2,16 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- manager.yaml
labels:
- includeSelectors: true
pairs:
app.kubernetes.io/component: controller-manager
images:
- name: controller
newName: ghcr.io/kubeflow/notebooks/workspace-controller
newName: ghcr.io/kubeflow/notebooks/workspaces-controller
newTag: latest
- name: workspaces-controller
newName: ghcr.io/kubeflow/notebooks/workspaces-controller
newTag: latest

View File

@ -1,32 +1,21 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: system
name: kubeflow-workspaces
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-manager
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: workspaces-controller
spec:
selector:
matchLabels:
control-plane: controller-manager
matchLabels: {}
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels:
control-plane: controller-manager
labels: {}
spec:
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
# according to the platforms which are supported by your solution.
@ -64,7 +53,7 @@ spec:
- --leader-elect
- --health-probe-bind-address=:8081
- --metrics-bind-address=0
image: controller:latest
image: workspaces-controller:latest
imagePullPolicy: IfNotPresent
name: manager
securityContext:
@ -93,5 +82,5 @@ spec:
requests:
cpu: 10m
memory: 64Mi
serviceAccountName: controller-manager
serviceAccountName: workspaces-controller
terminationGracePeriodSeconds: 10

View File

@ -1,2 +1,6 @@
resources:
- monitor.yaml
labels:
- pairs:
app.kubernetes.io/component: metrics

View File

@ -2,12 +2,7 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-monitor
namespace: system
name: workspaces-controller-metrics-monitor
spec:
endpoints:
- path: /metrics
@ -15,4 +10,4 @@ spec:
scheme: http
selector:
matchLabels:
control-plane: controller-manager
app.kubernetes.io/component: controller-manager

View File

@ -1,3 +1,7 @@
labels:
- pairs:
app.kubernetes.io/component: controller-manager
resources:
# All RBAC will be applied under this service account in
# the deployment namespace. You may comment out this resource
@ -18,3 +22,4 @@ resources:
- workspacekind_viewer_role.yaml
- workspace_editor_role.yaml
- workspace_viewer_role.yaml

View File

@ -2,9 +2,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: leader-election-role
rules:
- apiGroups:

View File

@ -1,9 +1,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -11,5 +8,5 @@ roleRef:
name: leader-election-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system
name: workspaces-controller
namespace: kubeflow-workspaces

View File

@ -1,12 +1,7 @@
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-service
namespace: system
name: workspaces-controller-metrics-service
spec:
ports:
- name: http
@ -14,4 +9,4 @@ spec:
protocol: TCP
targetPort: 8080
selector:
control-plane: controller-manager
app.kubernetes.io/component: controller-manager

View File

@ -1,9 +1,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -11,5 +8,5 @@ roleRef:
name: manager-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system
name: workspaces-controller
namespace: kubeflow-workspaces

View File

@ -1,8 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: controller-manager
namespace: system
name: workspaces-controller

View File

@ -2,9 +2,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: workspace-editor-role
rules:
- apiGroups:

View File

@ -2,9 +2,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: workspace-viewer-role
rules:
- apiGroups:

View File

@ -2,9 +2,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: workspacekind-editor-role
rules:
- apiGroups:

View File

@ -2,9 +2,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: workspacekind-viewer-role
rules:
- apiGroups:

View File

@ -1,3 +1,7 @@
labels:
- pairs:
app.kubernetes.io/component: webhook
resources:
- manifests.yaml
- service.yaml

View File

@ -1,15 +1,11 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: workspace-controller
app.kubernetes.io/managed-by: kustomize
name: webhook-service
namespace: system
spec:
ports:
- port: 443
protocol: TCP
targetPort: 9443
selector:
control-plane: controller-manager
app.kubernetes.io/component: controller-manager

View File

@ -6,6 +6,7 @@ require (
github.com/go-logr/logr v1.4.2
github.com/onsi/ginkgo/v2 v2.19.0
github.com/onsi/gomega v1.33.1
golang.org/x/time v0.3.0
k8s.io/api v0.31.0
k8s.io/apimachinery v0.31.0
k8s.io/client-go v0.31.0
@ -56,7 +57,6 @@ require (
golang.org/x/sys v0.21.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect

View File

@ -34,8 +34,8 @@ import (
const (
// controller configs
controllerNamespace = "workspace-controller-system"
controllerImage = "ghcr.io/kubeflow/notebooks/workspace-controller:latest"
controllerNamespace = "kubeflow-workspaces"
controllerImage = "ghcr.io/kubeflow/notebooks/workspaces-controller:latest"
// workspace configs
workspaceNamespace = "workspace-test"
@ -89,17 +89,17 @@ var _ = Describe("controller", Ordered, func() {
_, err = utils.Run(cmd)
ExpectWithOffset(1, err).NotTo(HaveOccurred())
By("deploying the controller-manager")
By("deploying the workspaces-controller")
cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", controllerImage))
_, err = utils.Run(cmd)
ExpectWithOffset(1, err).NotTo(HaveOccurred())
By("validating that the controller-manager pod is running as expected")
By("validating that the workspaces-controller pod is running as expected")
var controllerPodName string
verifyControllerUp := func(g Gomega) {
// Get controller pod name
cmd := exec.Command("kubectl", "get", "pods",
"-l", "control-plane=controller-manager",
"-l", "app.kubernetes.io/component=controller-manager",
"-n", controllerNamespace,
"-o", "go-template={{ range .items }}"+
"{{ if not .metadata.deletionTimestamp }}"+
@ -107,13 +107,13 @@ var _ = Describe("controller", Ordered, func() {
"{{ \"\\n\" }}{{ end }}{{ end }}",
)
podOutput, err := utils.Run(cmd)
g.Expect(err).NotTo(HaveOccurred(), "failed to get controller-manager pod")
g.Expect(err).NotTo(HaveOccurred(), "failed to get workspaces-controller pod")
// Ensure only 1 controller pod is running
podNames := utils.GetNonEmptyLines(podOutput)
g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running")
controllerPodName = podNames[0]
g.Expect(controllerPodName).To(ContainSubstring("controller-manager"))
g.Expect(controllerPodName).To(ContainSubstring("workspaces-controller"))
// Validate controller pod status
cmd = exec.Command("kubectl", "get", "pods",
@ -123,7 +123,7 @@ var _ = Describe("controller", Ordered, func() {
)
statusPhase, err := utils.Run(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(statusPhase).To(BeEquivalentTo(corev1.PodRunning), "Incorrect controller-manager pod phase")
g.Expect(statusPhase).To(BeEquivalentTo(corev1.PodRunning), "Incorrect workspaces-controller pod phase")
}
Eventually(verifyControllerUp, timeout, interval).Should(Succeed())