Implements server-side apply

This commit is contained in:
Sean Sullivan 2020-10-19 13:07:45 -07:00
parent 91bebf1ab3
commit 99285377ba
18 changed files with 229 additions and 32 deletions

View File

@ -35,9 +35,15 @@ func GetApplyRunner(provider provider.Provider, ioStreams genericclioptions.IOSt
RunE: r.RunE,
}
cmd.Flags().BoolVar(&r.serverSideOptions.ServerSideApply, "server-side", false,
"If true, apply merge patch is calculated on API server instead of client.")
cmd.Flags().BoolVar(&r.serverSideOptions.ForceConflicts, "force-conflicts", false,
"If true, overwrite applied fields on server if field manager conflict.")
cmd.Flags().StringVar(&r.serverSideOptions.FieldManager, "field-manager", common.DefaultFieldManager,
"The client owner of the fields being applied on the server-side.")
cmd.Flags().StringVar(&r.output, "output", printers.DefaultPrinter(),
fmt.Sprintf("Output format, must be one of %s", strings.Join(printers.SupportedPrinters(), ",")))
cmd.Flags().DurationVar(&r.period, "poll-period", 2*time.Second,
"Polling period for resource statuses.")
cmd.Flags().DurationVar(&r.reconcileTimeout, "reconcile-timeout", time.Duration(0),
@ -64,6 +70,7 @@ type ApplyRunner struct {
Applier *apply.Applier
provider provider.Provider
serverSideOptions common.ServerSideOptions
output string
period time.Duration
reconcileTimeout time.Duration
@ -111,8 +118,9 @@ func (r *ApplyRunner) RunE(cmd *cobra.Command, args []string) error {
return err
}
ch := r.Applier.Run(context.Background(), object.InfosToUnstructureds(infos), apply.Options{
PollInterval: r.period,
ReconcileTimeout: r.reconcileTimeout,
ServerSideOptions: r.serverSideOptions,
PollInterval: r.period,
ReconcileTimeout: r.reconcileTimeout,
// If we are not waiting for status, tell the applier to not
// emit the events.
EmitStatusEvents: emitStatusEvents,

View File

@ -113,10 +113,16 @@ func (r *PreviewRunner) RunE(cmd *cobra.Command, args []string) error {
// Run the applier. It will return a channel where we can receive updates
// to keep track of progress and any issues.
serverSideOptions := common.ServerSideOptions{
ServerSideApply: false,
ForceConflicts: false,
FieldManager: common.DefaultFieldManager,
}
ch = r.Applier.Run(ctx, object.InfosToUnstructureds(infos), apply.Options{
EmitStatusEvents: false,
NoPrune: noPrune,
DryRunStrategy: drs,
EmitStatusEvents: false,
NoPrune: noPrune,
DryRunStrategy: drs,
ServerSideOptions: serverSideOptions,
})
} else {
inv, _, err := inventory.SplitInfos(infos)

View File

@ -0,0 +1,128 @@
[kind]: https://github.com/kubernetes-sigs/kind
# Demo: Server Side Apply
This demo shows how to invoke server-side apply,
instead of the default client-side apply.
First define a place to work:
<!-- @makeWorkplace @testE2EAgainstLatestRelease -->
```
DEMO_HOME=$(mktemp -d)
```
Alternatively, use
> ```
> DEMO_HOME=~/hello
> ```
## Establish the base
<!-- @createBase @testE2EAgainstLatestRelease -->
```
BASE=$DEMO_HOME/base
mkdir -p $BASE
OUTPUT=$DEMO_HOME/output
mkdir -p $OUTPUT
function expectedOutputLine() {
test 1 == \
$(grep "$@" $OUTPUT/status | wc -l); \
echo $?
}
```
## Create the first "app"
Create the config yaml for two config maps: (cm-a, cm-b).
<!-- @createFirstConfigMaps @testE2EAgainstLatestRelease-->
```
cat <<EOF >$BASE/config-map-a.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cm-a
labels:
name: test-config-map-label
EOF
cat <<EOF >$BASE/config-map-b.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cm-b
labels:
name: test-config-map-label
data:
foo: sean
EOF
```
## Run end-to-end tests
The following requires installation of [kind].
Delete any existing kind cluster and create a new one. By default the name of the cluster is "kind".
<!-- @deleteAndCreateKindCluster @testE2EAgainstLatestRelease -->
```
kind delete cluster
kind create cluster
```
Use the kapply init command to generate the inventory template. This contains
the namespace and inventory id used by apply to create inventory objects.
<!-- @createInventoryTemplate @testE2EAgainstLatestRelease-->
```
kapply init $BASE > $OUTPUT/status
expectedOutputLine "namespace: default is used for inventory object"
```
Apply the "app" to the cluster. All the config maps should be created, and
no resources should be pruned.
<!-- @runServerSideApply @testE2EAgainstLatestRelease -->
```
kapply apply $BASE --server-side --reconcile-timeout=1m > $OUTPUT/status
expectedOutputLine "configmap/cm-a serversideapplied"
expectedOutputLine "configmap/cm-b serversideapplied"
expectedOutputLine "2 serverside applied"
# There should be only one inventory object
kubectl get cm --selector='cli-utils.sigs.k8s.io/inventory-id' --no-headers | wc -l > $OUTPUT/status
expectedOutputLine "1"
# Capture the inventory object name for later testing
kubectl get cm --selector='!cli-utils.sigs.k8s.io/inventory-id' --no-headers | wc -l > $OUTPUT/status
expectedOutputLine "2"
# ConfigMap cm-a had been created in the cluster
kubectl get configmap/cm-a --no-headers | wc -l > $OUTPUT/status
expectedOutputLine "1"
# ConfigMap cm-b had been created in the cluster
kubectl get configmap/cm-b --no-headers | wc -l > $OUTPUT/status
expectedOutputLine "1"
```
Update a config map to update a field owned by the default field manager.
Update both config maps, using a different field-manager to create a
conflict, but the the --force-conflicts flag to overwrite successfully.
The conflicting field is "data.foo".
<!-- @runServerSideApplyWithForceConflicts @testE2EAgainstLatestRelease -->
```
cat <<EOF >$BASE/config-map-b.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cm-b
labels:
name: test-config-map-label
data:
foo: baz
EOF
kapply apply $BASE --server-side --field-manager=sean --force-conflicts --reconcile-timeout=1m > $OUTPUT/status
expectedOutputLine "configmap/cm-a serversideapplied"
expectedOutputLine "configmap/cm-b serversideapplied"
expectedOutputLine "2 serverside applied"
```

View File

@ -198,6 +198,7 @@ func (a *Applier) Run(ctx context.Context, objects []*unstructured.Unstructured,
InfoHelper: a.infoHelper,
Mapper: mapper,
}).BuildTaskQueue(resourceObjects, solver.Options{
ServerSideOptions: options.ServerSideOptions,
ReconcileTimeout: options.ReconcileTimeout,
Prune: !options.NoPrune,
DryRunStrategy: options.DryRunStrategy,
@ -238,6 +239,9 @@ func (a *Applier) Run(ctx context.Context, objects []*unstructured.Unstructured,
}
type Options struct {
// Encapsulates the fields for server-side apply.
ServerSideOptions common.ServerSideOptions
// ReconcileTimeout defines whether the applier should wait
// until all applied resources have been reconciled, and if so,
// how long to wait.

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=ApplyEventOperation"; DO NOT EDIT.
package event

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=ApplyEventType"; DO NOT EDIT.
package event

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=DeleteEventOperation"; DO NOT EDIT.
package event

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=DeleteEventType"; DO NOT EDIT.
package event

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=PruneEventOperation"; DO NOT EDIT.
package event

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=PruneEventType"; DO NOT EDIT.
package event

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=ResourceAction"; DO NOT EDIT.
package event

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=StatusEventType"; DO NOT EDIT.
package event

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=Type"; DO NOT EDIT.
package event

View File

@ -41,6 +41,7 @@ type TaskQueueSolver struct {
}
type Options struct {
ServerSideOptions common.ServerSideOptions
ReconcileTimeout time.Duration
Prune bool
DryRunStrategy common.DryRunStrategy
@ -66,12 +67,13 @@ func (t *TaskQueueSolver) BuildTaskQueue(ro resourceObjects,
crdSplitRes, hasCRDs := splitAfterCRDs(remainingInfos)
if hasCRDs {
tasks = append(tasks, &task.ApplyTask{
Objects: append(crdSplitRes.before, crdSplitRes.crds...),
CRDs: crdSplitRes.crds,
DryRunStrategy: o.DryRunStrategy,
InfoHelper: t.InfoHelper,
Factory: t.Factory,
Mapper: t.Mapper,
Objects: append(crdSplitRes.before, crdSplitRes.crds...),
CRDs: crdSplitRes.crds,
ServerSideOptions: o.ServerSideOptions,
DryRunStrategy: o.DryRunStrategy,
InfoHelper: t.InfoHelper,
Factory: t.Factory,
Mapper: t.Mapper,
})
if !o.DryRunStrategy.ClientOrServerDryRun() {
objs := object.UnstructuredsToObjMetas(crdSplitRes.crds)
@ -88,12 +90,13 @@ func (t *TaskQueueSolver) BuildTaskQueue(ro resourceObjects,
tasks = append(tasks,
&task.ApplyTask{
Objects: remainingInfos,
CRDs: crdSplitRes.crds,
DryRunStrategy: o.DryRunStrategy,
InfoHelper: t.InfoHelper,
Factory: t.Factory,
Mapper: t.Mapper,
Objects: remainingInfos,
CRDs: crdSplitRes.crds,
ServerSideOptions: o.ServerSideOptions,
DryRunStrategy: o.DryRunStrategy,
InfoHelper: t.InfoHelper,
Factory: t.Factory,
Mapper: t.Mapper,
},
&task.SendEventTask{
Event: event.Event{

View File

@ -38,12 +38,13 @@ type applyOptions interface {
// ApplyTask applies the given Objects to the cluster
// by using the ApplyOptions.
type ApplyTask struct {
Factory util.Factory
InfoHelper info.InfoHelper
Mapper meta.RESTMapper
Objects []*unstructured.Unstructured
CRDs []*unstructured.Unstructured
DryRunStrategy common.DryRunStrategy
Factory util.Factory
InfoHelper info.InfoHelper
Mapper meta.RESTMapper
Objects []*unstructured.Unstructured
CRDs []*unstructured.Unstructured
DryRunStrategy common.DryRunStrategy
ServerSideOptions common.ServerSideOptions
}
// applyOptionsFactoryFunc is a factory function for creating a new
@ -110,7 +111,8 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
// Create a new instance of the applyOptions interface and use it
// to apply the objects.
ao, err := applyOptionsFactoryFunc(taskContext.EventChannel(), a.DryRunStrategy, a.Factory)
ao, err := applyOptionsFactoryFunc(taskContext.EventChannel(),
a.ServerSideOptions, a.DryRunStrategy, a.Factory)
if err != nil {
a.sendTaskResult(taskContext, err)
return
@ -142,7 +144,8 @@ func (a *ApplyTask) Start(taskContext *taskrunner.TaskContext) {
}()
}
func newApplyOptions(eventChannel chan event.Event, strategy common.DryRunStrategy, factory util.Factory) (applyOptions, error) {
func newApplyOptions(eventChannel chan event.Event, serverSideOptions common.ServerSideOptions,
strategy common.DryRunStrategy, factory util.Factory) (applyOptions, error) {
discovery, err := factory.ToDiscoveryClient()
if err != nil {
return nil, err
@ -171,10 +174,10 @@ func newApplyOptions(eventChannel chan event.Event, strategy common.DryRunStrate
PrintFlags: &genericclioptions.PrintFlags{
OutputFormat: &emptyString,
},
// Setting the ServerSideApply here since it is needed for server-side
// dry-run. We don't yet support SSA.
ServerSideApply: strategy.ServerDryRun(),
FieldManager: "kubectl", // TODO: Make this configurable
// Server-side apply if flag set or server-side dry run.
ServerSideApply: strategy.ServerDryRun() || serverSideOptions.ServerSideApply,
ForceConflicts: serverSideOptions.ForceConflicts,
FieldManager: serverSideOptions.FieldManager,
DryRun: strategy.ClientOrServerDryRun(),
ServerDryRun: strategy.ServerDryRun(),
ToPrinter: (&KubectlPrinterAdapter{

View File

@ -78,7 +78,7 @@ func TestApplyTask_FetchGeneration(t *testing.T) {
objs := toUnstructureds(tc.rss)
oldAO := applyOptionsFactoryFunc
applyOptionsFactoryFunc = func(chan event.Event, common.DryRunStrategy, util.Factory) (applyOptions, error) {
applyOptionsFactoryFunc = func(chan event.Event, common.ServerSideOptions, common.DryRunStrategy, util.Factory) (applyOptions, error) {
return &fakeApplyOptions{}, nil
}
defer func() { applyOptionsFactoryFunc = oldAO }()
@ -242,7 +242,7 @@ func TestApplyTask_DryRun(t *testing.T) {
ao := &fakeApplyOptions{}
oldAO := applyOptionsFactoryFunc
applyOptionsFactoryFunc = func(chan event.Event, common.DryRunStrategy, util.Factory) (applyOptions, error) {
applyOptionsFactoryFunc = func(chan event.Event, common.ServerSideOptions, common.DryRunStrategy, util.Factory) (applyOptions, error) {
return ao, nil
}
defer func() { applyOptionsFactoryFunc = oldAO }()

View File

@ -29,6 +29,9 @@ const (
OnRemoveKeep = "keep"
// Maximum random number, non-inclusive, eight digits.
maxRandInt = 100000000
// DefaultFieldManager is default owner of applied fields in
// server-side apply.
DefaultFieldManager = "kubectl"
)
// RandomStr returns an eight-digit (with leading zeros) string of a
@ -78,3 +81,15 @@ func (drs DryRunStrategy) ServerDryRun() bool {
func (drs DryRunStrategy) ClientOrServerDryRun() bool {
return drs == DryRunClient || drs == DryRunServer
}
// ServerSideOptions encapsulates the fields to implement server-side apply.
type ServerSideOptions struct {
// ServerSideApply means the merge patch is calculated on the API server instead of the client.
ServerSideApply bool
// ForceConflicts overwrites the fields when applying if the field manager differs.
ForceConflicts bool
// FieldManager identifies the client "owner" of the applied fields (e.g. kubectl)
FieldManager string
}

View File

@ -1,3 +1,6 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Code generated by "stringer -type=EventType"; DO NOT EDIT.
package event