feat: remove xfn

Signed-off-by: Philippe Scorsolini <p.scorsolini@gmail.com>
This commit is contained in:
Philippe Scorsolini 2023-08-02 14:01:10 +02:00 committed by Nic Cope
parent 45f7cbc94e
commit 30be3e05cf
54 changed files with 16 additions and 7155 deletions

View File

@ -241,7 +241,7 @@ jobs:
strategy:
fail-fast: false
matrix:
test-suite: [base, composition-webhook-schema-validation, composition-functions]
test-suite: [base, composition-webhook-schema-validation]
steps:
- name: Setup QEMU

View File

@ -87,13 +87,6 @@ jobs:
release: ${{ fromJSON(needs.generate-matrix.outputs.supported_releases) }}
image:
- crossplane/crossplane
- crossplane/xfn
exclude:
# excluded because xfn was introduced only in v1.11
- image: crossplane/xfn
release: v1.9
- image: crossplane/xfn
release: v1.10
runs-on: ubuntu-latest
steps:

View File

@ -29,7 +29,7 @@ NPROCS ?= 1
# to half the number of CPU cores.
GO_TEST_PARALLEL := $(shell echo $$(( $(NPROCS) / 2 )))
GO_STATIC_PACKAGES = $(GO_PROJECT)/cmd/crossplane $(GO_PROJECT)/cmd/crank $(GO_PROJECT)/cmd/xfn
GO_STATIC_PACKAGES = $(GO_PROJECT)/cmd/crossplane $(GO_PROJECT)/cmd/crank
GO_TEST_PACKAGES = $(GO_PROJECT)/test/e2e
GO_LDFLAGS += -X $(GO_PROJECT)/internal/version.version=$(VERSION)
GO_SUBDIRS += cmd internal apis
@ -62,7 +62,7 @@ HELM_VALUES_TEMPLATE_SKIPPED = true
# all be in folders at the same level (no additional levels of nesting).
REGISTRY_ORGS = docker.io/crossplane xpkg.upbound.io/crossplane
IMAGES = crossplane xfn
IMAGES = crossplane
-include build/makelib/imagelight.mk
# ====================================================================================
@ -127,9 +127,6 @@ e2e.test.images:
e2e-tag-images: e2e.test.images
@$(INFO) Tagging E2E test images
@docker tag $(BUILD_REGISTRY)/$(PROJECT_NAME)-$(TARGETARCH) crossplane-e2e/$(PROJECT_NAME):latest || $(FAIL)
@docker tag $(BUILD_REGISTRY)/xfn-$(TARGETARCH) crossplane-e2e/xfn:latest || $(FAIL)
@docker tag $(BUILD_REGISTRY)/fn-labelizer-$(TARGETARCH) crossplane-e2e/fn-labelizer:latest || $(FAIL)
@docker tag $(BUILD_REGISTRY)/fn-tmp-writer-$(TARGETARCH) crossplane-e2e/fn-tmp-writer:latest || $(FAIL)
@$(OK) Tagged E2E test images
# NOTE(negz): There's already a go.test.integration target, but it's weird.

2
build

@ -1 +1 @@
Subproject commit 292f958d2d97f26b450723998f82f7fc1767920c
Subproject commit bd5297bd16c113cbc5ed1905b1d96aa1cb3078ec

View File

@ -122,26 +122,6 @@ and their default values.
| `serviceAccount.customAnnotations` | Add custom `annotations` to the Crossplane ServiceAccount. | `{}` |
| `tolerations` | Add `tolerations` to the Crossplane pod deployment. | `[]` |
| `webhooks.enabled` | Enable webhooks for Crossplane and installed Provider packages. | `true` |
| `xfn.args` | Add custom arguments to the Composite functions runner container. | `[]` |
| `xfn.cache.configMap` | The name of a ConfigMap to use as the Composite function runner package cache. Disables the default Composite function runner package cache `emptyDir` Volume. | `""` |
| `xfn.cache.medium` | Set to `Memory` to hold the Composite function runner package cache in a RAM-backed file system. Useful for Crossplane development. | `""` |
| `xfn.cache.pvc` | The name of a PersistentVolumeClaim to use as the Composite function runner package cache. Disables the default Composite function runner package cache `emptyDir` Volume. | `""` |
| `xfn.cache.sizeLimit` | The size limit for the Composite function runner package cache. If medium is `Memory` the `sizeLimit` can't exceed Node memory. | `"1Gi"` |
| `xfn.enabled` | Enable the alpha Composition functions (`xfn`) sidecar container. Also requires Crossplane `args` value `--enable-composition-functions` set. | `false` |
| `xfn.extraEnvVars` | Add custom environmental variables to the Composite function runner container. Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`. | `{}` |
| `xfn.image.pullPolicy` | Composite function runner container image pull policy. | `"IfNotPresent"` |
| `xfn.image.repository` | Composite function runner container image. | `"crossplane/xfn"` |
| `xfn.image.tag` | Composite function runner container image tag. Defaults to the value of `appVersion` in Chart.yaml. | `""` |
| `xfn.resources.limits.cpu` | CPU resource limits for the Composite function runner container. | `"2000m"` |
| `xfn.resources.limits.memory` | Memory resource limits for the Composite function runner container. | `"2Gi"` |
| `xfn.resources.requests.cpu` | CPU resource requests for the Composite function runner container. | `"1000m"` |
| `xfn.resources.requests.memory` | Memory resource requests for the Composite function runner container. | `"1Gi"` |
| `xfn.securityContext.allowPrivilegeEscalation` | Enable `allowPrivilegeEscalation` for the Composite function runner container. | `false` |
| `xfn.securityContext.capabilities.add` | Set Linux capabilities for the Composite function runner container. The default values allow the container to create an unprivileged user namespace for running Composite function containers. | `["SETUID","SETGID"]` |
| `xfn.securityContext.readOnlyRootFilesystem` | Set the Composite function runner container root file system as read-only. | `true` |
| `xfn.securityContext.runAsGroup` | The group ID used by the Composite function runner container. | `65532` |
| `xfn.securityContext.runAsUser` | The user ID used by the Composite function runner container. | `65532` |
| `xfn.securityContext.seccompProfile.type` | Apply a `seccompProfile` to the Composite function runner container. The default value allows the Composite function runner container permissions to use the `unshare` syscall. | `"Unconfined"` |
### Command Line

View File

@ -21,7 +21,7 @@ spec:
type: {{ .Values.deploymentStrategy }}
template:
metadata:
{{- if or .Values.metrics.enabled .Values.xfn.enabled .Values.customAnnotations }}
{{- if or .Values.metrics.enabled .Values.customAnnotations }}
annotations:
{{- end }}
{{- if .Values.metrics.enabled }}
@ -29,9 +29,6 @@ spec:
prometheus.io/port: "8080"
prometheus.io/scrape: "true"
{{- end }}
{{- if .Values.xfn.enabled }}
container.apparmor.security.beta.kubernetes.io/{{ .Chart.Name }}-xfn: unconfined
{{- end }}
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
@ -187,50 +184,6 @@ spec:
{{- if .Values.extraVolumeMountsCrossplane }}
{{- toYaml .Values.extraVolumeMountsCrossplane | nindent 10 }}
{{- end }}
{{- if .Values.xfn.enabled }}
- image: "{{ .Values.xfn.image.repository }}:{{ .Values.xfn.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
args:
- start
{{- range $arg := .Values.xfn.args }}
- {{ $arg }}
{{- end }}
imagePullPolicy: {{ .Values.xfn.image.pullPolicy }}
name: {{ .Chart.Name }}-xfn
resources:
{{- toYaml .Values.xfn.resources | nindent 12 }}
securityContext:
{{- toYaml .Values.xfn.securityContext | nindent 12 }}
env:
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: limits.cpu
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: limits.memory
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.registryCaBundleConfig.key }}
- name: CA_BUNDLE_PATH
value: "/certs/{{ .Values.registryCaBundleConfig.key }}"
{{- end}}
{{- range $key, $value := .Values.xfn.extraEnvVars }}
- name: {{ $key | replace "." "_" }}
value: {{ $value | quote }}
{{- end }}
volumeMounts:
- mountPath: /xfn
name: xfn-cache
{{- if .Values.registryCaBundleConfig.name }}
- mountPath: /certs
name: ca-certs
{{- end }}
{{- end }}
volumes:
- name: package-cache
{{- if .Values.packageCache.pvc }}
@ -244,17 +197,6 @@ spec:
medium: {{ .Values.packageCache.medium }}
sizeLimit: {{ .Values.packageCache.sizeLimit }}
{{- end }}
{{- if .Values.xfn.enabled }}
- name: xfn-cache
{{- if .Values.xfn.cache.pvc }}
persistentVolumeClaim:
claimName: {{ .Values.xfn.cache.pvc }}
{{- else }}
emptyDir:
medium: {{ .Values.xfn.cache.medium }}
sizeLimit: {{ .Values.xfn.cache.sizeLimit }}
{{- end }}
{{- end }}
{{- if .Values.registryCaBundleConfig.name }}
- name: ca-certs
configMap:

View File

@ -163,60 +163,4 @@ podSecurityContextRBACManager: {}
extraVolumesCrossplane: {}
# -- Add custom `volumeMounts` to the Crossplane pod.
extraVolumeMountsCrossplane: {}
xfn:
# -- Enable the alpha Composition functions (`xfn`) sidecar container. Also requires
# Crossplane `args` value `--enable-composition-functions` set.
enabled: false
image:
# -- Composite function runner container image.
repository: crossplane/xfn
# -- Composite function runner container image tag. Defaults to the value of `appVersion` in Chart.yaml.
tag: ""
# -- Composite function runner container image pull policy.
pullPolicy: IfNotPresent
# -- Add custom arguments to the Composite functions runner container.
args: []
# -- Add custom environmental variables to the Composite function runner container.
# Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`.
extraEnvVars: {}
securityContext:
# -- The user ID used by the Composite function runner container.
runAsUser: 65532
# -- The group ID used by the Composite function runner container.
runAsGroup: 65532
# -- Enable `allowPrivilegeEscalation` for the Composite function runner container.
allowPrivilegeEscalation: false
# -- Set the Composite function runner container root file system as read-only.
readOnlyRootFilesystem: true
capabilities:
# -- Set Linux capabilities for the Composite function runner container.
# The default values allow the container to create an unprivileged
# user namespace for running Composite function containers.
add: ["SETUID", "SETGID"]
seccompProfile:
# -- Apply a `seccompProfile` to the Composite function runner container.
# The default value allows the Composite function runner container
# permissions to use the `unshare` syscall.
type: Unconfined
cache:
# -- Set to `Memory` to hold the Composite function runner package cache in a RAM-backed file system. Useful for Crossplane development.
medium: ""
# -- The size limit for the Composite function runner package cache. If medium is `Memory` the `sizeLimit` can't exceed Node memory.
sizeLimit: 1Gi
# -- The name of a PersistentVolumeClaim to use as the Composite function runner package cache. Disables the default Composite function runner package cache `emptyDir` Volume.
pvc: ""
# -- The name of a ConfigMap to use as the Composite function runner package cache. Disables the default Composite function runner package cache `emptyDir` Volume.
configMap: ""
resources:
limits:
# -- CPU resource limits for the Composite function runner container.
cpu: 2000m
# -- Memory resource limits for the Composite function runner container.
memory: 2Gi
requests:
# -- CPU resource requests for the Composite function runner container.
cpu: 1000m
# -- Memory resource requests for the Composite function runner container.
memory: 1Gi
extraVolumeMountsCrossplane: {}

View File

@ -1,28 +0,0 @@
# This is debian:bookworm-slim (i.e. Debian 12, testing)
FROM debian:bookworm-slim@sha256:9bd077d2f77c754f4f7f5ee9e6ded9ff1dff92c6dce877754da21b917c122c77
ARG TARGETOS
ARG TARGETARCH
# TODO(negz): Find a better way to get an OCI runtime? Ideally we'd grab a
# static build of crun (or runc) that we could drop into a distroless image. We
# slightly prefer crun for its nascent WASM and KVM capabilities, but they only
# offer static builds for amd64 and arm64 and building our own takes a long
# time.
RUN apt-get update && apt-get install -y ca-certificates crun && rm -rf /var/lib/apt/lists/*
COPY bin/${TARGETOS}\_${TARGETARCH}/xfn /usr/local/bin/
# We run xfn as root in order to grant it CAP_SETUID and CAP_SETGID, which are
# required in order to create a user namespace with more than one available UID
# and GID. xfn invokes all of the logic that actually fetches, caches, and runs
# a container as an unprivileged user (relative to the root/initial user
# namespace - the user is privileged inside the user namespace xfn creates).
#
# It's possible to run xfn without any root privileges at all - uncomment the
# following line to do so. Note that in this mode xfn will only be able to
# create containers with a single UID and GID (0), so Containerized Functions
# that don't run as root may not work.
# USER 65532
ENTRYPOINT ["xfn"]

View File

@ -1,35 +0,0 @@
# ====================================================================================
# Setup Project
include ../../../build/makelib/common.mk
# ====================================================================================
# Options
include ../../../build/makelib/imagelight.mk
# ====================================================================================
# Targets
img.build:
@$(INFO) docker build $(IMAGE)
@$(MAKE) BUILD_ARGS="--load" img.build.shared
@$(OK) docker build $(IMAGE)
img.publish:
@$(INFO) docker publish $(IMAGE)
@$(MAKE) BUILD_ARGS="--push" img.build.shared
@$(OK) docker publish $(IMAGE)
img.build.shared:
@cp Dockerfile $(IMAGE_TEMP_DIR) || $(FAIL)
@cp -r $(OUTPUT_DIR)/bin/ $(IMAGE_TEMP_DIR)/bin || $(FAIL)
@docker buildx build $(BUILD_ARGS) \
--platform $(IMAGE_PLATFORMS) \
-t $(IMAGE) \
$(IMAGE_TEMP_DIR) || $(FAIL)
img.promote:
@$(INFO) docker promote $(FROM_IMAGE) to $(TO_IMAGE)
@docker buildx imagetools create -t $(TO_IMAGE) $(FROM_IMAGE)
@$(OK) docker promote $(FROM_IMAGE) to $(TO_IMAGE)

View File

@ -13,11 +13,7 @@ eval $(make --no-print-directory -C ${scriptdir}/../.. build.vars)
# ensure the tools we need are installed
make ${KIND} ${KUBECTL} ${HELM3}
# The Composition Functions sidecar container.
XFN_NAME=xfn
BUILD_IMAGE="${BUILD_REGISTRY}/${PROJECT_NAME}-${TARGETARCH}"
XFN_IMAGE="${BUILD_REGISTRY}/${XFN_NAME}-${TARGETARCH}"
DEFAULT_NAMESPACE="crossplane-system"
function copy_image_to_cluster() {
@ -54,7 +50,6 @@ case "${1:-}" in
update)
helm_tag="$(cat _output/version)"
copy_image_to_cluster ${BUILD_IMAGE} "${PROJECT_NAME}/${PROJECT_NAME}:${helm_tag}" "${KIND_NAME}"
copy_image_to_cluster ${XFN_IMAGE} "${PROJECT_NAME}/${XFN_NAME}:${helm_tag}" "${KIND_NAME}"
;;
restart)
if check_context; then
@ -69,21 +64,19 @@ case "${1:-}" in
echo "copying image for helm"
helm_tag="$(cat _output/version)"
copy_image_to_cluster ${BUILD_IMAGE} "${PROJECT_NAME}/${PROJECT_NAME}:${helm_tag}" "${KIND_NAME}"
copy_image_to_cluster ${XFN_IMAGE} "${PROJECT_NAME}/${XFN_NAME}:${helm_tag}" "${KIND_NAME}"
[ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}"
echo "installing helm package into \"$ns\" namespace"
${HELM3} install ${PROJECT_NAME} --namespace ${ns} --create-namespace ${projectdir}/cluster/charts/${PROJECT_NAME} --set image.pullPolicy=Never,imagePullSecrets='',image.tag="${helm_tag}",xfn.image.tag="${helm_tag}" ${HELM3_FLAGS}
${HELM3} install ${PROJECT_NAME} --namespace ${ns} --create-namespace ${projectdir}/cluster/charts/${PROJECT_NAME} --set image.pullPolicy=Never,imagePullSecrets='',image.tag="${helm_tag}" ${HELM3_FLAGS}
;;
helm-upgrade)
echo "copying image for helm"
helm_tag="$(cat _output/version)"
copy_image_to_cluster ${BUILD_IMAGE} "${PROJECT_NAME}/${PROJECT_NAME}:${helm_tag}" "${KIND_NAME}"
copy_image_to_cluster ${XFN_IMAGE} "${PROJECT_NAME}/${XFN_NAME}:${helm_tag}" "${KIND_NAME}"
[ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}"
echo "upgrading helm package in \"$ns\" namespace"
${HELM3} upgrade --install --namespace ${ns} --create-namespace ${PROJECT_NAME} ${projectdir}/cluster/charts/${PROJECT_NAME} ${HELM3_FLAGS} --set image.pullPolicy=Never,imagePullSecrets='',image.tag="${helm_tag}",xfn.image.tag="${helm_tag}"
${HELM3} upgrade --install --namespace ${ns} --create-namespace ${PROJECT_NAME} ${projectdir}/cluster/charts/${PROJECT_NAME} ${HELM3_FLAGS} --set image.pullPolicy=Never,imagePullSecrets='',image.tag=${helm_tag}
;;
helm-delete)
[ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
package core
import (
"crypto/x509"

View File

@ -47,7 +47,6 @@ import (
pkgcontroller "github.com/crossplane/crossplane/internal/controller/pkg/controller"
"github.com/crossplane/crossplane/internal/features"
"github.com/crossplane/crossplane/internal/initializer"
"github.com/crossplane/crossplane/internal/oci"
"github.com/crossplane/crossplane/internal/transport"
"github.com/crossplane/crossplane/internal/validation/apiextensions/v1/composition"
"github.com/crossplane/crossplane/internal/xpkg"
@ -78,7 +77,7 @@ func (c *Command) Run() error {
type startCommand struct {
Profile string `placeholder:"host:port" help:"Serve runtime profiling data via HTTP at /debug/pprof."`
Namespace string `short:"n" help:"Namespace used to unpack, run packages and for xfn private registry credentials extraction." default:"crossplane-system" env:"POD_NAMESPACE"`
Namespace string `short:"n" help:"Namespace used to unpack and run packages." default:"crossplane-system" env:"POD_NAMESPACE"`
ServiceAccount string `help:"Name of the Crossplane Service Account." default:"crossplane" env:"POD_SERVICE_ACCOUNT"`
CacheDir string `short:"c" help:"Directory used for caching package images." default:"/cache" env:"CACHE_DIR"`
LeaderElection bool `short:"l" help:"Use leader election for the controller manager." default:"false" env:"LEADER_ELECTION"`
@ -234,7 +233,7 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli
}
if c.CABundlePath != "" {
rootCAs, err := oci.ParseCertificatesFromPath(c.CABundlePath)
rootCAs, err := ParseCertificatesFromPath(c.CABundlePath)
if err != nil {
return errors.Wrap(err, "Cannot parse CA bundle")
}

View File

@ -1,83 +0,0 @@
/*
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package main is the reference implementation of Composition Functions.
package main
import (
"fmt"
"github.com/alecthomas/kong"
"github.com/google/go-containerregistry/pkg/name"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane/cmd/xfn/run"
"github.com/crossplane/crossplane/cmd/xfn/spark"
"github.com/crossplane/crossplane/cmd/xfn/start"
"github.com/crossplane/crossplane/internal/version"
)
type debugFlag bool
type versionFlag bool
// KongVars represent the kong variables associated with the CLI parser
// required for the Registry default variable interpolation.
var KongVars = kong.Vars{
"default_registry": name.DefaultRegistry,
}
var cli struct {
Debug debugFlag `short:"d" help:"Print verbose logging statements."`
Version versionFlag `short:"v" help:"Print version and quit."`
Registry string `short:"r" help:"Default registry used to fetch containers when not specified in tag." default:"${default_registry}" env:"REGISTRY"`
Start start.Command `cmd:"" help:"Start listening for Composition Function runs over gRPC." default:"1"`
Run run.Command `cmd:"" help:"Run a Composition Function."`
Spark spark.Command `cmd:"" help:"xfn executes Spark inside a user namespace to run a Composition Function. You shouldn't run it directly." hidden:""`
}
// BeforeApply binds the dev mode logger to the kong context when debugFlag is
// passed.
func (d debugFlag) BeforeApply(ctx *kong.Context) error { //nolint:unparam // BeforeApply requires this signature.
zl := zap.New(zap.UseDevMode(true)).WithName("xfn")
// BindTo uses reflect.TypeOf to get reflection type of used interface
// A *logging.Logger value here is used to find the reflection type here.
// Please refer: https://golang.org/pkg/reflect/#TypeOf
ctx.BindTo(logging.NewLogrLogger(zl), (*logging.Logger)(nil))
return nil
}
func (v versionFlag) BeforeApply(app *kong.Kong) error { //nolint:unparam // BeforeApply requires this signature.
fmt.Fprintln(app.Stdout, version.New().GetVersionString())
app.Exit(0)
return nil
}
func main() {
zl := zap.New().WithName("xfn")
ctx := kong.Parse(&cli,
kong.Name("xfn"),
kong.Description("Crossplane Composition Functions."),
kong.BindTo(logging.NewLogrLogger(zl), (*logging.Logger)(nil)),
kong.UsageOnError(),
KongVars,
)
ctx.FatalIfErrorf(ctx.Run(&start.Args{Registry: cli.Registry}))
}

View File

@ -1,145 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package run implements a convenience CLI to run and test Composition Functions.
package run
import (
"context"
"os"
"path/filepath"
"time"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1alpha1"
"github.com/crossplane/crossplane/cmd/xfn/start"
"github.com/crossplane/crossplane/internal/xfn"
)
// Error strings
const (
errWriteFIO = "cannot write FunctionIO YAML to stdout"
errRunFunction = "cannot run function"
errParseImage = "cannot parse image reference"
errResolveKeychain = "cannot resolve default registry authentication keychain"
errAuthCfg = "cannot get default registry authentication credentials"
)
// Command runs a Composition function.
type Command struct {
CacheDir string `short:"c" help:"Directory used for caching function images and containers." default:"/xfn"`
Timeout time.Duration `help:"Maximum time for which the function may run before being killed." default:"30s"`
ImagePullPolicy string `help:"Whether the image may be pulled from a remote registry." enum:"Always,Never,IfNotPresent" default:"IfNotPresent"`
NetworkPolicy string `help:"Whether the function may access the network." enum:"Runner,Isolated" default:"Isolated"`
MapRootUID int `help:"UID that will map to 0 in the function's user namespace. The following 65336 UIDs must be available. Ignored if xfn does not have CAP_SETUID and CAP_SETGID." default:"100000"`
MapRootGID int `help:"GID that will map to 0 in the function's user namespace. The following 65336 GIDs must be available. Ignored if xfn does not have CAP_SETUID and CAP_SETGID." default:"100000"`
// TODO(negz): filecontent appears to take multiple args when it does not.
// Bump kong once https://github.com/alecthomas/kong/issues/346 is fixed.
Image string `arg:"" help:"OCI image to run."`
FunctionIO []byte `arg:"" help:"YAML encoded FunctionIO to pass to the function." type:"filecontent"`
}
// Run a Composition container function.
func (c *Command) Run(args *start.Args) error {
// If we don't have CAP_SETUID or CAP_SETGID, we'll only be able to map our
// own UID and GID to root inside the user namespace.
rootUID := os.Getuid()
rootGID := os.Getgid()
setuid := xfn.HasCapSetUID() && xfn.HasCapSetGID() // We're using 'setuid' as shorthand for both here.
if setuid {
rootUID = c.MapRootUID
rootGID = c.MapRootGID
}
ref, err := name.ParseReference(c.Image, name.WithDefaultRegistry(args.Registry))
if err != nil {
return errors.Wrap(err, errParseImage)
}
// We want to resolve authentication credentials here, using the caller's
// environment rather than inside the user namespace that spark will create.
// DefaultKeychain uses credentials from ~/.docker/config.json to pull
// private images. Despite being 'the default' it must be explicitly
// provided, or go-containerregistry will use anonymous authentication.
auth, err := authn.DefaultKeychain.Resolve(ref.Context())
if err != nil {
return errors.Wrap(err, errResolveKeychain)
}
a, err := auth.Authorization()
if err != nil {
return errors.Wrap(err, errAuthCfg)
}
f := xfn.NewContainerRunner(xfn.SetUID(setuid), xfn.MapToRoot(rootUID, rootGID), xfn.WithCacheDir(filepath.Clean(c.CacheDir)), xfn.WithRegistry(args.Registry))
rsp, err := f.RunFunction(context.Background(), &v1alpha1.RunFunctionRequest{
Image: c.Image,
Input: c.FunctionIO,
ImagePullConfig: &v1alpha1.ImagePullConfig{
PullPolicy: pullPolicy(c.ImagePullPolicy),
Auth: &v1alpha1.ImagePullAuth{
Username: a.Username,
Password: a.Password,
Auth: a.Auth,
IdentityToken: a.IdentityToken,
RegistryToken: a.RegistryToken,
},
},
RunFunctionConfig: &v1alpha1.RunFunctionConfig{
Timeout: durationpb.New(c.Timeout),
Network: &v1alpha1.NetworkConfig{
Policy: networkPolicy(c.NetworkPolicy),
},
},
})
if err != nil {
return errors.Wrap(err, errRunFunction)
}
_, err = os.Stdout.Write(rsp.GetOutput())
return errors.Wrap(err, errWriteFIO)
}
func pullPolicy(p string) v1alpha1.ImagePullPolicy {
switch p {
case "Always":
return v1alpha1.ImagePullPolicy_IMAGE_PULL_POLICY_ALWAYS
case "Never":
return v1alpha1.ImagePullPolicy_IMAGE_PULL_POLICY_NEVER
case "IfNotPresent":
fallthrough
default:
return v1alpha1.ImagePullPolicy_IMAGE_PULL_POLICY_IF_NOT_PRESENT
}
}
func networkPolicy(p string) v1alpha1.NetworkPolicy {
switch p {
case "Runner":
return v1alpha1.NetworkPolicy_NETWORK_POLICY_RUNNER
case "Isolated":
fallthrough
default:
return v1alpha1.NetworkPolicy_NETWORK_POLICY_ISOLATED
}
}

View File

@ -1,275 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package spark runs a Composition Function. It is designed to be run as root
// inside an unprivileged user namespace.
package spark
import (
"bytes"
"context"
"io"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/uuid"
runtime "github.com/opencontainers/runtime-spec/specs-go"
"google.golang.org/protobuf/proto"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1alpha1"
"github.com/crossplane/crossplane/cmd/xfn/start"
"github.com/crossplane/crossplane/internal/oci"
"github.com/crossplane/crossplane/internal/oci/spec"
"github.com/crossplane/crossplane/internal/oci/store"
"github.com/crossplane/crossplane/internal/oci/store/overlay"
"github.com/crossplane/crossplane/internal/oci/store/uncompressed"
)
// Error strings.
const (
errReadRequest = "cannot read request from stdin"
errUnmarshalRequest = "cannot unmarshal request data from stdin"
errNewBundleStore = "cannot create OCI runtime bundle store"
errNewDigestStore = "cannot create OCI image digest store"
errParseRef = "cannot parse OCI image reference"
errPull = "cannot pull OCI image"
errBundleFn = "cannot create OCI runtime bundle"
errMkRuntimeRootdir = "cannot make OCI runtime cache"
errRuntime = "OCI runtime error"
errCleanupBundle = "cannot cleanup OCI runtime bundle"
errMarshalResponse = "cannot marshal response data to stdout"
errWriteResponse = "cannot write response data to stdout"
errCPULimit = "cannot limit container CPU"
errMemoryLimit = "cannot limit container memory"
errHostNetwork = "cannot configure container to run in host network namespace"
)
// The path within the cache dir that the OCI runtime should use for its
// '--root' cache.
const ociRuntimeRoot = "runtime"
// The time after which the OCI runtime will be killed if none is specified in
// the RunFunctionRequest.
const defaultTimeout = 25 * time.Second
// Command runs a containerized Composition Function.
type Command struct {
CacheDir string `short:"c" help:"Directory used for caching function images and containers." default:"/xfn"`
Runtime string `help:"OCI runtime binary to invoke." default:"crun"`
MaxStdioBytes int64 `help:"Maximum size of stdout and stderr for functions." default:"0"`
CABundlePath string `help:"Additional CA bundle to use when fetching function images from registry." env:"CA_BUNDLE_PATH"`
}
// Run a Composition Function inside an unprivileged user namespace. Reads a
// protocol buffer serialized RunFunctionRequest from stdin, and writes a
// protocol buffer serialized RunFunctionResponse to stdout.
func (c *Command) Run(args *start.Args) error { //nolint:gocyclo // TODO(negz): Refactor some of this out into functions, add tests.
pb, err := io.ReadAll(os.Stdin)
if err != nil {
return errors.Wrap(err, errReadRequest)
}
req := &v1alpha1.RunFunctionRequest{}
if err := proto.Unmarshal(pb, req); err != nil {
return errors.Wrap(err, errUnmarshalRequest)
}
t := req.GetRunFunctionConfig().GetTimeout().AsDuration()
if t == 0 {
t = defaultTimeout
}
ctx, cancel := context.WithTimeout(context.Background(), t)
defer cancel()
runID := uuid.NewString()
// We prefer to use an overlayfs bundler where possible. It roughly doubles
// the disk space per image because it caches layers as overlay compatible
// directories in addition to the CachingImagePuller's cache of uncompressed
// layer tarballs. The advantage is faster start times for containers with
// cached image, because it creates an overlay rootfs. The uncompressed
// bundler on the other hand must untar all of a containers layers to create
// a new rootfs each time it runs a container.
var s store.Bundler = uncompressed.NewBundler(c.CacheDir)
if overlay.Supported(c.CacheDir) {
s, err = overlay.NewCachingBundler(c.CacheDir)
}
if err != nil {
return errors.Wrap(err, errNewBundleStore)
}
// This store maps OCI references to their last known digests. We use it to
// resolve references when the imagePullPolicy is Never or IfNotPresent.
h, err := store.NewDigest(c.CacheDir)
if err != nil {
return errors.Wrap(err, errNewDigestStore)
}
r, err := name.ParseReference(req.GetImage(), name.WithDefaultRegistry(args.Registry))
if err != nil {
return errors.Wrap(err, errParseRef)
}
opts := []oci.ImageClientOption{FromImagePullConfig(req.GetImagePullConfig())}
if c.CABundlePath != "" {
rootCA, err := oci.ParseCertificatesFromPath(c.CABundlePath)
if err != nil {
return errors.Wrap(err, "Cannot parse CA bundle")
}
opts = append(opts, oci.WithCustomCA(rootCA))
}
// We cache every image we pull to the filesystem. Layers are cached as
// uncompressed tarballs. This allows them to be extracted quickly when
// using the uncompressed.Bundler, which extracts a new root filesystem for
// every container run.
p := oci.NewCachingPuller(h, store.NewImage(c.CacheDir), &oci.RemoteClient{})
img, err := p.Image(ctx, r, opts...)
if err != nil {
return errors.Wrap(err, errPull)
}
// Create an OCI runtime bundle for this container run.
b, err := s.Bundle(ctx, img, runID, FromRunFunctionConfig(req.GetRunFunctionConfig()))
if err != nil {
return errors.Wrap(err, errBundleFn)
}
root := filepath.Join(c.CacheDir, ociRuntimeRoot)
if err := os.MkdirAll(root, 0700); err != nil {
_ = b.Cleanup()
return errors.Wrap(err, errMkRuntimeRootdir)
}
// TODO(negz): Consider using the OCI runtime's lifecycle management commands
// (i.e create, start, and delete) rather than run. This would allow spark
// to return without sitting in-between xfn and crun. It's also generally
// recommended; 'run' is more for testing. In practice though run seems to
// work just fine for our use case.
//nolint:gosec // Executing with user-supplied input is intentional.
cmd := exec.CommandContext(ctx, c.Runtime, "--root="+root, "run", "--bundle="+b.Path(), runID)
cmd.Stdin = bytes.NewReader(req.GetInput())
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
_ = b.Cleanup()
return errors.Wrap(err, errRuntime)
}
stderrPipe, err := cmd.StderrPipe()
if err != nil {
_ = b.Cleanup()
return errors.Wrap(err, errRuntime)
}
if err := cmd.Start(); err != nil {
_ = b.Cleanup()
return errors.Wrap(err, errRuntime)
}
stdout, err := io.ReadAll(limitReaderIfNonZero(stdoutPipe, c.MaxStdioBytes))
if err != nil {
_ = b.Cleanup()
return errors.Wrap(err, errRuntime)
}
stderr, err := io.ReadAll(limitReaderIfNonZero(stderrPipe, c.MaxStdioBytes))
if err != nil {
_ = b.Cleanup()
return errors.Wrap(err, errRuntime)
}
if err := cmd.Wait(); err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
exitErr.Stderr = stderr
}
_ = b.Cleanup()
return errors.Wrap(err, errRuntime)
}
if err := b.Cleanup(); err != nil {
return errors.Wrap(err, errCleanupBundle)
}
rsp := &v1alpha1.RunFunctionResponse{Output: stdout}
pb, err = proto.Marshal(rsp)
if err != nil {
return errors.Wrap(err, errMarshalResponse)
}
_, err = os.Stdout.Write(pb)
return errors.Wrap(err, errWriteResponse)
}
func limitReaderIfNonZero(r io.Reader, limit int64) io.Reader {
if limit == 0 {
return r
}
return io.LimitReader(r, limit)
}
// FromImagePullConfig configures an image client with options derived from the
// supplied ImagePullConfig.
func FromImagePullConfig(cfg *v1alpha1.ImagePullConfig) oci.ImageClientOption {
return func(o *oci.ImageClientOptions) {
switch cfg.GetPullPolicy() {
case v1alpha1.ImagePullPolicy_IMAGE_PULL_POLICY_ALWAYS:
oci.WithPullPolicy(oci.ImagePullPolicyAlways)(o)
case v1alpha1.ImagePullPolicy_IMAGE_PULL_POLICY_NEVER:
oci.WithPullPolicy(oci.ImagePullPolicyNever)(o)
case v1alpha1.ImagePullPolicy_IMAGE_PULL_POLICY_IF_NOT_PRESENT, v1alpha1.ImagePullPolicy_IMAGE_PULL_POLICY_UNSPECIFIED:
oci.WithPullPolicy(oci.ImagePullPolicyIfNotPresent)(o)
}
if a := cfg.GetAuth(); a != nil {
oci.WithPullAuth(&oci.ImagePullAuth{
Username: a.GetUsername(),
Password: a.GetPassword(),
Auth: a.GetAuth(),
IdentityToken: a.GetIdentityToken(),
RegistryToken: a.GetRegistryToken(),
})(o)
}
}
}
// FromRunFunctionConfig extends a runtime spec with configuration derived from
// the supplied RunFunctionConfig.
func FromRunFunctionConfig(cfg *v1alpha1.RunFunctionConfig) spec.Option {
return func(s *runtime.Spec) error {
if l := cfg.GetResources().GetLimits().GetCpu(); l != "" {
if err := spec.WithCPULimit(l)(s); err != nil {
return errors.Wrap(err, errCPULimit)
}
}
if l := cfg.GetResources().GetLimits().GetMemory(); l != "" {
if err := spec.WithMemoryLimit(l)(s); err != nil {
return errors.Wrap(err, errMemoryLimit)
}
}
if cfg.GetNetwork().GetPolicy() == v1alpha1.NetworkPolicy_NETWORK_POLICY_RUNNER {
if err := spec.WithHostNetwork()(s); err != nil {
return errors.Wrap(err, errHostNetwork)
}
}
return nil
}
}

View File

@ -1,70 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package start implements the reference Composition Function runner.
// It exposes a gRPC API that may be used to run Composition Functions.
package start
import (
"os"
"path/filepath"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane/internal/xfn"
)
// Error strings
const (
errListenAndServe = "cannot listen for and serve gRPC API"
)
// Args contains the default registry used to pull XFN containers.
type Args struct {
Registry string
}
// Command starts a gRPC API to run Composition Functions.
type Command struct {
CacheDir string `short:"c" help:"Directory used for caching function images and containers." default:"/xfn"`
MapRootUID int `help:"UID that will map to 0 in the function's user namespace. The following 65336 UIDs must be available. Ignored if xfn does not have CAP_SETUID and CAP_SETGID." default:"100000"`
MapRootGID int `help:"GID that will map to 0 in the function's user namespace. The following 65336 GIDs must be available. Ignored if xfn does not have CAP_SETUID and CAP_SETGID." default:"100000"`
Network string `help:"Network on which to listen for gRPC connections." default:"unix"`
Address string `help:"Address at which to listen for gRPC connections." default:"@crossplane/fn/default.sock"`
}
// Run a Composition Function gRPC API.
func (c *Command) Run(args *Args, log logging.Logger) error {
// If we don't have CAP_SETUID or CAP_SETGID, we'll only be able to map our
// own UID and GID to root inside the user namespace.
rootUID := os.Getuid()
rootGID := os.Getgid()
setuid := xfn.HasCapSetUID() && xfn.HasCapSetGID() // We're using 'setuid' as shorthand for both here.
if setuid {
rootUID = c.MapRootUID
rootGID = c.MapRootGID
}
// TODO(negz): Expose a healthz endpoint and otel metrics.
f := xfn.NewContainerRunner(
xfn.SetUID(setuid),
xfn.MapToRoot(rootUID, rootGID),
xfn.WithCacheDir(filepath.Clean(c.CacheDir)),
xfn.WithLogger(log),
xfn.WithRegistry(args.Registry))
return errors.Wrap(f.ListenAndServe(c.Network, c.Address), errListenAndServe)
}

12
go.mod
View File

@ -9,18 +9,14 @@ require (
github.com/alecthomas/kong v0.8.0
github.com/bufbuild/buf v1.26.1
github.com/crossplane/crossplane-runtime v0.20.1
github.com/cyphar/filepath-securejoin v0.2.3
github.com/google/go-cmp v0.5.9
github.com/google/go-containerregistry v0.16.1
github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230617045147-2472cbbbf289
github.com/google/uuid v1.3.0
github.com/jmattheis/goverter v0.17.4
github.com/opencontainers/runtime-spec v1.1.0-rc.3.0.20230610073135-48415de180cf
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.9.3
github.com/spf13/afero v1.9.5
golang.org/x/sync v0.3.0
golang.org/x/sys v0.11.0
google.golang.org/grpc v1.57.0
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0
google.golang.org/protobuf v1.31.0
@ -30,7 +26,6 @@ require (
k8s.io/client-go v0.27.3
k8s.io/code-generator v0.27.3
k8s.io/utils v0.0.0-20230505201702-9f6742963106
kernel.org/pub/linux/libs/security/libcap/cap v1.2.69
sigs.k8s.io/controller-runtime v0.15.0
sigs.k8s.io/controller-tools v0.12.1
sigs.k8s.io/e2e-framework v0.2.1-0.20230716064705-49e8554b536f
@ -38,8 +33,6 @@ require (
sigs.k8s.io/yaml v1.3.0
)
require google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
require (
cloud.google.com/go/compute v1.19.3 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
@ -78,6 +71,7 @@ require (
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/dave/jennifer v1.6.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
@ -111,6 +105,7 @@ require (
github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v1.0.0 // indirect
@ -169,12 +164,14 @@ require (
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.13.0 // indirect; indirect // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.11.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
@ -182,7 +179,6 @@ require (
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
k8s.io/klog/v2 v2.100.1
k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 // indirect
kernel.org/pub/linux/libs/security/libcap/psx v1.2.69 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)

6
go.sum
View File

@ -442,8 +442,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0=
github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/runtime-spec v1.1.0-rc.3.0.20230610073135-48415de180cf h1:AGnwZS8lmjGxN2/XlzORiYESAk7HOlE3XI37uhIP9Vw=
github.com/opencontainers/runtime-spec v1.1.0-rc.3.0.20230610073135-48415de180cf/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
@ -958,10 +956,6 @@ k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRy
k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ=
k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU=
k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
kernel.org/pub/linux/libs/security/libcap/cap v1.2.69 h1:N0m3tKYbkRMmDobh/47ngz+AWeV7PcfXMDi8xu3Vrag=
kernel.org/pub/linux/libs/security/libcap/cap v1.2.69/go.mod h1:Tk5Ip2TuxaWGpccL7//rAsLRH6RQ/jfqTGxuN/+i/FQ=
kernel.org/pub/linux/libs/security/libcap/psx v1.2.69 h1:IdrOs1ZgwGw5CI+BH6GgVVlOt+LAXoPyh7enr8lfaXs=
kernel.org/pub/linux/libs/security/libcap/psx v1.2.69/go.mod h1:+l6Ee2F59XiJ2I6WR5ObpC1utCQJZ/VLsEbQCD8RG24=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -53,7 +53,6 @@ import (
const (
errFetchXRConnectionDetails = "cannot fetch composite resource connection details"
errGetExistingCDs = "cannot get existing composed resources"
errImgPullCfg = "cannot get xfn image pull config"
errBuildFunctionIOObserved = "cannot build FunctionIO observed state"
errBuildFunctionIODesired = "cannot build initial FunctionIO desired state"
errMarshalXR = "cannot marshal composite resource"

View File

@ -1,19 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package oci contains functionality for working with Open Container Initiative
// (OCI) images and containers.
package oci

View File

@ -1,342 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package layer extracts OCI image layer tarballs.
package layer
import (
"archive/tar"
"context"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
// Error strings.
const (
errAdvanceTarball = "cannot advance to next entry in tarball"
errExtractTarHeader = "cannot extract tar header"
errEvalSymlinks = "cannot evaluate symlinks"
errMkdir = "cannot make directory"
errLstat = "cannot lstat directory"
errChmod = "cannot chmod path"
errSymlink = "cannot create symlink"
errOpenFile = "cannot open file"
errCopyFile = "cannot copy file"
errCloseFile = "cannot close file"
errFmtHandleTarHeader = "cannot handle tar header for %q"
errFmtWhiteoutFile = "cannot whiteout file %q"
errFmtWhiteoutDir = "cannot whiteout opaque directory %q"
errFmtUnsupportedType = "tarball contained header %q with unknown type %q"
errFmtNotDir = "path %q exists but is not a directory"
errFmtSize = "wrote %d bytes to %q; expected %d"
)
// OCI whiteouts.
// See https://github.com/opencontainers/image-spec/blob/v1.0/layer.md#whiteouts
const (
ociWhiteoutPrefix = ".wh."
ociWhiteoutMetaPrefix = ociWhiteoutPrefix + ociWhiteoutPrefix
ociWhiteoutOpaqueDir = ociWhiteoutMetaPrefix + ".opq"
)
// A HeaderHandler handles a single file (header) within a tarball.
type HeaderHandler interface {
// Handle the supplied tarball header by applying it to the supplied path,
// e.g. creating a file, directory, etc. The supplied io.Reader is expected
// to be a tarball advanced to the supplied header, i.e. via tr.Next().
Handle(h *tar.Header, tr io.Reader, path string) error
}
// A HeaderHandlerFn is a function that acts as a HeaderHandler.
type HeaderHandlerFn func(h *tar.Header, tr io.Reader, path string) error
// Handle the supplied tarball header.
func (fn HeaderHandlerFn) Handle(h *tar.Header, tr io.Reader, path string) error {
return fn(h, tr, path)
}
// A StackingExtractor is a Extractor that extracts an OCI layer by
// 'stacking' it atop the supplied root directory.
type StackingExtractor struct {
h HeaderHandler
}
// NewStackingExtractor extracts an OCI layer by 'stacking' it atop the
// supplied root directory.
func NewStackingExtractor(h HeaderHandler) *StackingExtractor {
return &StackingExtractor{h: h}
}
// Apply calls the StackingExtractor's HeaderHandler for each file in the
// supplied layer tarball, adjusting their path to be rooted under the supplied
// root directory. That is, /foo would be extracted to /bar as /bar/foo.
func (e *StackingExtractor) Apply(ctx context.Context, tb io.Reader, root string) error {
tr := tar.NewReader(tb)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
hdr, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return errors.Wrap(err, errAdvanceTarball)
}
// SecureJoin joins hdr.Name to root, ensuring the resulting path does
// not escape root either syntactically (via "..") or via symlinks in
// the path. For example:
//
// * Joining "/a" and "../etc/passwd" results in "/a/etc/passwd".
// * Joining "/a" and "evil/passwd" where "/a/evil" exists and is a
// symlink to "/etc" results in "/a/etc/passwd".
//
// https://codeql.github.com/codeql-query-help/go/go-unsafe-unzip-symlink/
path, err := securejoin.SecureJoin(root, hdr.Name)
if err != nil {
return errors.Wrap(err, errEvalSymlinks)
}
if err := e.h.Handle(hdr, tr, path); err != nil {
return errors.Wrapf(err, errFmtHandleTarHeader, hdr.Name)
}
}
// TODO(negz): Handle MAC times for directories. This needs to be done last,
// since mutating a directory's contents will update its MAC times.
return nil
}
// A WhiteoutHandler handles OCI whiteouts by deleting the corresponding files.
// It passes anything that is not a whiteout to an underlying HeaderHandler. It
// avoids deleting any file created by the underling HeaderHandler.
type WhiteoutHandler struct {
wrapped HeaderHandler
handled map[string]bool
}
// NewWhiteoutHandler returns a HeaderHandler that handles OCI whiteouts by
// deleting the corresponding files.
func NewWhiteoutHandler(hh HeaderHandler) *WhiteoutHandler {
return &WhiteoutHandler{wrapped: hh, handled: make(map[string]bool)}
}
// Handle the supplied tar header.
func (w *WhiteoutHandler) Handle(h *tar.Header, tr io.Reader, path string) error {
// If this isn't a whiteout file, extract it.
if !strings.HasPrefix(filepath.Base(path), ociWhiteoutPrefix) {
w.handled[path] = true
return w.wrapped.Handle(h, tr, path)
}
// We must only whiteout files from previous layers; i.e. not files that
// we've extracted from this layer. We're operating on a merged overlayfs,
// so we can't rely on the filesystem to distinguish what files are from a
// previous layer. Instead we track which files we've extracted from this
// layer and avoid whiting-out any file we've extracted. It's possible we'll
// see a whiteout out-of-order; i.e. we'll whiteout /foo, then later extract
// /foo from the same layer. This should be fine; we'll delete it, then
// recreate it, resulting in the desired file in our overlayfs upper dir.
// https://github.com/opencontainers/image-spec/blob/v1.0/layer.md#whiteouts
base := filepath.Base(path)
dir := filepath.Dir(path)
// Handle explicit whiteout files. These files resolve to an explicit path
// that should be deleted from the current layer.
if base != ociWhiteoutOpaqueDir {
whiteout := filepath.Join(dir, base[len(ociWhiteoutPrefix):])
if w.handled[whiteout] {
return nil
}
return errors.Wrapf(os.RemoveAll(whiteout), errFmtWhiteoutFile, whiteout)
}
// Handle an opaque directory. These files indicate that all siblings in
// their directory should be deleted from the current layer.
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
if errors.Is(err, os.ErrNotExist) {
// Either this path is under a directory we already deleted or we've
// been asked to whiteout a directory that doesn't exist.
return nil
}
if err != nil {
return err
}
// Don't delete the directory we're whiting out, or a file we've
// extracted from this layer.
if path == dir || w.handled[path] {
return nil
}
return os.RemoveAll(path)
})
return errors.Wrapf(err, errFmtWhiteoutDir, dir)
}
// An ExtractHandler extracts from a tarball per the supplied tar header by
// calling a handler that knows how to extract the type of file.
type ExtractHandler struct {
handler map[byte]HeaderHandler
}
// NewExtractHandler returns a HeaderHandler that extracts from a tarball per
// the supplied tar header by calling a handler that knows how to extract the
// type of file.
func NewExtractHandler() *ExtractHandler {
return &ExtractHandler{handler: map[byte]HeaderHandler{
tar.TypeDir: HeaderHandlerFn(ExtractDir),
tar.TypeSymlink: HeaderHandlerFn(ExtractSymlink),
tar.TypeReg: HeaderHandlerFn(ExtractFile),
tar.TypeFifo: HeaderHandlerFn(ExtractFIFO),
// TODO(negz): Don't extract hard links as symlinks. Creating an actual
// hard link would require us to securely join the path of the 'root'
// directory we're untarring into with h.Linkname, but we don't
// currently plumb the root directory down to this level.
tar.TypeLink: HeaderHandlerFn(ExtractSymlink),
}}
}
// Handle creates a file at the supplied path per the supplied tar header.
func (e *ExtractHandler) Handle(h *tar.Header, tr io.Reader, path string) error {
// ExtractDir should correct these permissions.
if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil {
return errors.Wrap(err, errMkdir)
}
hd, ok := e.handler[h.Typeflag]
if !ok {
// Better to return an error than to write a partial layer. Note that
// tar.TypeBlock and tar.TypeChar in particular are unsupported because
// they can't be created without CAP_MKNOD in the 'root' user namespace
// per https://man7.org/linux/man-pages/man7/user_namespaces.7.html
return errors.Errorf(errFmtUnsupportedType, h.Name, h.Typeflag)
}
if err := hd.Handle(h, tr, path); err != nil {
return errors.Wrap(err, errExtractTarHeader)
}
// We expect to have CAP_CHOWN (inside a user namespace) when running
// this code, but if that namespace was created by a user without
// CAP_SETUID and CAP_SETGID only one UID and GID (root) will exist and
// we'll get syscall.EINVAL if we try to chown to any other. We ignore
// this error and attempt to run the function regardless; functions that
// run 'as root' (in their namespace) should work fine.
// TODO(negz): Return this error if it isn't syscall.EINVAL? Currently
// doing so would require taking a dependency on the syscall package per
// https://groups.google.com/g/golang-nuts/c/BpWN9N-hw3s.
_ = os.Lchown(path, h.Uid, h.Gid)
// TODO(negz): Handle MAC times.
return nil
}
// ExtractDir is a HeaderHandler that creates a directory at the supplied path
// per the supplied tar header.
func ExtractDir(h *tar.Header, _ io.Reader, path string) error {
mode := h.FileInfo().Mode()
fi, err := os.Lstat(path)
if errors.Is(err, os.ErrNotExist) {
return errors.Wrap(os.MkdirAll(path, mode.Perm()), errMkdir)
}
if err != nil {
return errors.Wrap(err, errLstat)
}
if !fi.IsDir() {
return errors.Errorf(errFmtNotDir, path)
}
// We've been asked to extract a directory that exists; just try to ensure
// it has the correct permissions. It could be that we saw a file in this
// directory before we saw the directory itself, and created it with the
// file's permissions in a MkdirAll call.
return errors.Wrap(os.Chmod(path, mode.Perm()), errChmod)
}
// ExtractSymlink is a HeaderHandler that creates a symlink at the supplied path
// per the supplied tar header.
func ExtractSymlink(h *tar.Header, _ io.Reader, path string) error {
// We don't sanitize h.LinkName (the symlink's target). It will be sanitized
// by SecureJoin above to prevent malicious writes during the untar process,
// and will be evaluated relative to root during function execution.
return errors.Wrap(os.Symlink(h.Linkname, path), errSymlink)
}
// ExtractFile is a HeaderHandler that creates a regular file at the supplied
// path per the supplied tar header.
func ExtractFile(h *tar.Header, tr io.Reader, path string) error {
mode := h.FileInfo().Mode()
//nolint:gosec // The root of this path is user supplied input.
dst, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return errors.Wrap(err, errOpenFile)
}
n, err := copyChunks(dst, tr, 1024*1024) // Copy in 1MB chunks.
if err != nil {
_ = dst.Close()
return errors.Wrap(err, errCopyFile)
}
if err := dst.Close(); err != nil {
return errors.Wrap(err, errCloseFile)
}
if n != h.Size {
return errors.Errorf(errFmtSize, n, path, h.Size)
}
return nil
}
// copyChunks pleases gosec per https://github.com/securego/gosec/pull/433.
// Like Copy it reads from src until EOF, it does not treat an EOF from Read as
// an error to be reported.
//
// NOTE(negz): This rule confused me at first because io.Copy appears to use a
// buffer, but in fact it bypasses it if src/dst is an io.WriterTo/ReaderFrom.
func copyChunks(dst io.Writer, src io.Reader, chunkSize int64) (int64, error) {
var written int64
for {
w, err := io.CopyN(dst, src, chunkSize)
written += w
if errors.Is(err, io.EOF) {
return written, nil
}
if err != nil {
return written, err
}
}
}

View File

@ -1,31 +0,0 @@
//go:build !unix
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package layer
import (
"archive/tar"
"io"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
// ExtractFIFO returns an error on non-Unix systems
func ExtractFIFO(_ *tar.Header, _ io.Reader, _ string) error {
return errors.New("FIFOs are only supported on Unix")
}

View File

@ -1,466 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package layer
import (
"archive/tar"
"bytes"
"context"
"io"
"os"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
type MockHandler struct{ err error }
func (h *MockHandler) Handle(_ *tar.Header, _ io.Reader, _ string) error {
return h.err
}
func TestStackingExtractor(t *testing.T) {
errBoom := errors.New("boom")
coolFile := "/cool/file"
cancelled, cancel := context.WithCancel(context.Background())
cancel()
type args struct {
ctx context.Context
tb io.Reader
root string
}
cases := map[string]struct {
reason string
e *StackingExtractor
args args
want error
}{
"ContextDone": {
reason: "If the supplied context is done we should return its error.",
e: NewStackingExtractor(&MockHandler{}),
args: args{
ctx: cancelled,
},
want: cancelled.Err(),
},
"NotATarball": {
reason: "If the supplied io.Reader is not a tarball we should return an error.",
e: NewStackingExtractor(&MockHandler{}),
args: args{
ctx: context.Background(),
tb: func() io.Reader {
b := &bytes.Buffer{}
_, _ = b.WriteString("hi!")
return b
}(),
},
want: errors.Wrap(errors.New("unexpected EOF"), errAdvanceTarball),
},
"ErrorHandlingHeader": {
reason: "If our HeaderHandler returns an error we should surface it.",
e: NewStackingExtractor(&MockHandler{err: errBoom}),
args: args{
ctx: context.Background(),
tb: func() io.Reader {
b := &bytes.Buffer{}
tb := tar.NewWriter(b)
tb.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: coolFile,
})
_, _ = io.WriteString(tb, "hi!")
tb.Close()
return b
}(),
},
want: errors.Wrapf(errBoom, errFmtHandleTarHeader, coolFile),
},
"Success": {
reason: "If we successfully extract our tarball we should return a nil error.",
e: NewStackingExtractor(&MockHandler{}),
args: args{
ctx: context.Background(),
tb: func() io.Reader {
b := &bytes.Buffer{}
tb := tar.NewWriter(b)
tb.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: coolFile,
})
_, _ = io.WriteString(tb, "hi!")
tb.Close()
return b
}(),
},
want: nil,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := tc.e.Apply(tc.args.ctx, tc.args.tb, tc.args.root)
if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\ne.Apply(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWhiteoutHandler(t *testing.T) {
errBoom := errors.New("boom")
tmp, _ := os.MkdirTemp(os.TempDir(), t.Name())
defer os.RemoveAll(tmp)
coolDir := filepath.Join(tmp, "cool")
coolFile := filepath.Join(coolDir, "file")
coolWhiteout := filepath.Join(coolDir, ociWhiteoutPrefix+"file")
_ = os.MkdirAll(coolDir, 0700)
opaqueDir := filepath.Join(tmp, "opaque")
opaqueDirWhiteout := filepath.Join(opaqueDir, ociWhiteoutOpaqueDir)
_ = os.MkdirAll(opaqueDir, 0700)
f, _ := os.Create(filepath.Join(opaqueDir, "some-file"))
f.Close()
nonExistentDirWhiteout := filepath.Join(tmp, "non-exist", ociWhiteoutOpaqueDir)
type args struct {
h *tar.Header
tr io.Reader
path string
}
cases := map[string]struct {
reason string
h HeaderHandler
args args
want error
}{
"NotAWhiteout": {
reason: "Files that aren't whiteouts should be passed to the underlying handler.",
h: NewWhiteoutHandler(&MockHandler{err: errBoom}),
args: args{
path: coolFile,
},
want: errBoom,
},
"HeaderAlreadyHandled": {
reason: "We shouldn't whiteout a file that was already handled.",
h: func() HeaderHandler {
w := NewWhiteoutHandler(&MockHandler{})
_ = w.Handle(nil, nil, coolFile) // Handle the file we'll try to whiteout.
return w
}(),
args: args{
path: coolWhiteout,
},
want: nil,
},
"WhiteoutFile": {
reason: "We should delete a whited-out file.",
h: NewWhiteoutHandler(&MockHandler{}),
args: args{
path: filepath.Join(tmp, coolWhiteout),
},
// os.RemoveAll won't return an error even if this doesn't exist.
want: nil,
},
"OpaqueDirDoesNotExist": {
reason: "We should return early if asked to whiteout a directory that doesn't exist.",
h: NewWhiteoutHandler(&MockHandler{}),
args: args{
path: nonExistentDirWhiteout,
},
want: nil,
},
"WhiteoutOpaqueDir": {
reason: "We should whiteout all files in an opaque directory.",
h: NewWhiteoutHandler(&MockHandler{}),
args: args{
path: opaqueDirWhiteout,
},
want: nil,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := tc.h.Handle(tc.args.h, tc.args.tr, tc.args.path)
if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nh.Handle(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestExtractHandler(t *testing.T) {
errBoom := errors.New("boom")
tmp, _ := os.MkdirTemp(os.TempDir(), t.Name())
defer os.RemoveAll(tmp)
coolDir := filepath.Join(tmp, "cool")
coolFile := filepath.Join(coolDir, "file")
type args struct {
h *tar.Header
tr io.Reader
path string
}
cases := map[string]struct {
reason string
h HeaderHandler
args args
want error
}{
"UnsupportedMode": {
reason: "Handling an unsupported file type should return an error.",
h: &ExtractHandler{handler: map[byte]HeaderHandler{}},
args: args{
h: &tar.Header{
Typeflag: tar.TypeReg,
Name: coolFile,
},
},
want: errors.Errorf(errFmtUnsupportedType, coolFile, tar.TypeReg),
},
"HandlerError": {
reason: "Errors from an underlying handler should be returned.",
h: &ExtractHandler{handler: map[byte]HeaderHandler{
tar.TypeReg: &MockHandler{err: errBoom},
}},
args: args{
h: &tar.Header{
Typeflag: tar.TypeReg,
Name: coolFile,
},
},
want: errors.Wrap(errBoom, errExtractTarHeader),
},
"Success": {
reason: "If the underlying handler works we should return a nil error.",
h: &ExtractHandler{handler: map[byte]HeaderHandler{
tar.TypeReg: &MockHandler{},
}},
args: args{
h: &tar.Header{
Typeflag: tar.TypeReg,
// We don't currently check the return value of Lchown, but
// this will increase the chances it works by ensuring we
// try to chown to our own UID/GID.
Uid: os.Getuid(),
Gid: os.Getgid(),
},
path: coolFile,
},
want: nil,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := tc.h.Handle(tc.args.h, tc.args.tr, tc.args.path)
if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nh.Handle(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestExtractDir(t *testing.T) {
tmp, _ := os.MkdirTemp(os.TempDir(), t.Name())
defer os.RemoveAll(tmp)
newDir := filepath.Join(tmp, "new")
existingDir := filepath.Join(tmp, "existing-dir")
existingFile := filepath.Join(tmp, "existing-file")
_ = os.MkdirAll(existingDir, 0700)
f, _ := os.Create(existingFile)
f.Close()
type args struct {
h *tar.Header
tr io.Reader
path string
}
cases := map[string]struct {
reason string
h HeaderHandler
args args
want error
}{
"ExistingPathIsNotADir": {
reason: "We should return an error if trying to extract a dir to a path that exists but is not a dir.",
h: HeaderHandlerFn(ExtractDir),
args: args{
h: &tar.Header{Mode: 0700},
path: existingFile,
},
want: errors.Errorf(errFmtNotDir, existingFile),
},
"SuccessfulCreate": {
reason: "We should not return an error if we can create the dir.",
h: HeaderHandlerFn(ExtractDir),
args: args{
h: &tar.Header{Mode: 0700},
path: newDir,
},
want: nil,
},
"SuccessfulChmod": {
reason: "We should not return an error if we can chmod the existing dir",
h: HeaderHandlerFn(ExtractDir),
args: args{
h: &tar.Header{Mode: 0700},
path: existingDir,
},
want: nil,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := tc.h.Handle(tc.args.h, tc.args.tr, tc.args.path)
if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nh.Handle(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestExtractSymlink(t *testing.T) {
tmp, _ := os.MkdirTemp(os.TempDir(), t.Name())
defer os.RemoveAll(tmp)
linkSrc := filepath.Join(tmp, "src")
linkDst := filepath.Join(tmp, "dst")
inNonExistentDir := filepath.Join(tmp, "non-exist", "src")
type args struct {
h *tar.Header
tr io.Reader
path string
}
cases := map[string]struct {
reason string
h HeaderHandler
args args
want error
}{
"SymlinkError": {
reason: "We should return an error if we can't create a symlink",
h: HeaderHandlerFn(ExtractSymlink),
args: args{
h: &tar.Header{Linkname: linkDst},
path: inNonExistentDir,
},
want: errors.Wrap(errors.Errorf("symlink %s %s: no such file or directory", linkDst, inNonExistentDir), errSymlink),
},
"Successful": {
reason: "We should not return an error if we can create a symlink",
h: HeaderHandlerFn(ExtractSymlink),
args: args{
h: &tar.Header{Linkname: linkDst},
path: linkSrc,
},
want: nil,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := tc.h.Handle(tc.args.h, tc.args.tr, tc.args.path)
if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nh.Handle(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestExtractFile(t *testing.T) {
tmp, _ := os.MkdirTemp(os.TempDir(), t.Name())
defer os.RemoveAll(tmp)
inNonExistentDir := filepath.Join(tmp, "non-exist", "file")
newFile := filepath.Join(tmp, "coolFile")
type args struct {
h *tar.Header
tr io.Reader
path string
}
cases := map[string]struct {
reason string
h HeaderHandler
args args
want error
}{
"OpenFileError": {
reason: "We should return an error if we can't create a file",
h: HeaderHandlerFn(ExtractFile),
args: args{
h: &tar.Header{},
path: inNonExistentDir,
},
want: errors.Wrap(errors.Errorf("open %s: no such file or directory", inNonExistentDir), errOpenFile),
},
"SuccessfulWRite": {
reason: "We should return a nil error if we successfully wrote the file.",
h: HeaderHandlerFn(ExtractFile),
args: func() args {
b := &bytes.Buffer{}
tw := tar.NewWriter(b)
content := []byte("hi!")
h := &tar.Header{
Typeflag: tar.TypeReg,
Mode: 0600,
Size: int64(len(content)),
}
_ = tw.WriteHeader(h)
_, _ = tw.Write(content)
_ = tw.Close()
tr := tar.NewReader(b)
tr.Next()
return args{
h: h,
tr: tr,
path: newFile,
}
}(),
want: nil,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := tc.h.Handle(tc.args.h, tc.args.tr, tc.args.path)
if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nh.Handle(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -1,45 +0,0 @@
//go:build unix
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package layer
import (
"archive/tar"
"io"
"golang.org/x/sys/unix"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
// Error strings.
const (
errCreateFIFO = "cannot create FIFO"
)
// ExtractFIFO is a HeaderHandler that creates a FIFO at the supplied path per
// the supplied tar header.
func ExtractFIFO(h *tar.Header, _ io.Reader, path string) error {
// We won't have CAP_MKNOD in a user namespace created by a user who doesn't
// have CAP_MKNOD in the initial/root user namespace, but we don't need it
// to use mknod to create a FIFO.
// https://man7.org/linux/man-pages/man2/mknod.2.html
mode := uint32(h.Mode&0777) | unix.S_IFIFO
dev := unix.Mkdev(uint32(h.Devmajor), uint32(h.Devminor))
return errors.Wrap(unix.Mknod(path, mode, int(dev)), errCreateFIFO)
}

View File

@ -1,78 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package layer
import (
"archive/tar"
"io"
"os"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
func TestExtractFIFO(t *testing.T) {
tmp, _ := os.MkdirTemp(os.TempDir(), t.Name())
defer os.RemoveAll(tmp)
inNonExistentDir := filepath.Join(tmp, "non-exist", "src")
newFIFO := filepath.Join(tmp, "fifo")
type args struct {
h *tar.Header
tr io.Reader
path string
}
cases := map[string]struct {
reason string
h HeaderHandler
args args
want error
}{
"FIFOError": {
reason: "We should return an error if we can't create a FIFO",
h: HeaderHandlerFn(ExtractFIFO),
args: args{
h: &tar.Header{Mode: 0700},
path: inNonExistentDir,
},
want: errors.Wrap(errors.New("no such file or directory"), errCreateFIFO),
},
"Successful": {
reason: "We should not return an error if we can create a symlink",
h: HeaderHandlerFn(ExtractFIFO),
args: args{
h: &tar.Header{Mode: 0700},
path: newFIFO,
},
want: nil,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := tc.h.Handle(tc.args.h, tc.args.tr, tc.args.path)
if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nh.Handle(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -1,250 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"context"
"crypto/tls"
"crypto/x509"
"net/http"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
// Error strings.
const (
errPullNever = "refusing to pull from remote with image pull policy " + string(ImagePullPolicyNever)
errNewDigestStore = "cannot create new image digest store"
errPullImage = "cannot pull image from remote"
errStoreImage = "cannot cache image"
errImageDigest = "cannot get image digest"
errStoreDigest = "cannot cache image digest"
errLoadImage = "cannot load image from cache"
errLoadHash = "cannot load image digest"
)
// An ImagePullPolicy dictates when an image may be pulled from a remote.
type ImagePullPolicy string
// Image pull policies
const (
// ImagePullPolicyIfNotPresent only pulls from a remote if the image is not
// in the local cache. It is equivalent to ImagePullPolicyNever with a
// fall-back to ImagePullPolicyAlways.
ImagePullPolicyIfNotPresent ImagePullPolicy = "IfNotPresent"
// ImagePullPolicyAlways always pulls at least the image manifest from the
// remote. Layers are pulled if they are not in cache.
ImagePullPolicyAlways ImagePullPolicy = "Always"
// ImagePullPolicyNever never pulls anything from the remote. It resolves
// OCI references to digests (i.e. SHAs) using a local cache of known
// mappings.
ImagePullPolicyNever ImagePullPolicy = "Never"
)
// ImagePullAuth configures authentication to a remote registry.
type ImagePullAuth struct {
Username string
Password string
Auth string
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string
// RegistryToken is a bearer token to be sent to a registry.
RegistryToken string
}
// Authorization builds a go-containerregistry compatible AuthConfig.
func (a ImagePullAuth) Authorization() (*authn.AuthConfig, error) {
return &authn.AuthConfig{
Username: a.Username,
Password: a.Password,
Auth: a.Auth,
IdentityToken: a.IdentityToken,
RegistryToken: a.RegistryToken,
}, nil
}
// ImageClientOptions configure an ImageClient.
type ImageClientOptions struct {
pull ImagePullPolicy
auth *ImagePullAuth
transport *http.Transport
}
func parse(o ...ImageClientOption) ImageClientOptions {
opt := &ImageClientOptions{
pull: ImagePullPolicyIfNotPresent, // The default.
}
for _, fn := range o {
fn(opt)
}
return *opt
}
// An ImageClientOption configures an ImageClient.
type ImageClientOption func(c *ImageClientOptions)
// WithPullPolicy specifies whether a client may pull from a remote.
func WithPullPolicy(p ImagePullPolicy) ImageClientOption {
return func(c *ImageClientOptions) {
c.pull = p
}
}
// WithPullAuth specifies how a client should authenticate to a remote.
func WithPullAuth(a *ImagePullAuth) ImageClientOption {
return func(c *ImageClientOptions) {
c.auth = a
}
}
// WithCustomCA adds given root certificates to tls client configuration
func WithCustomCA(rootCAs *x509.CertPool) ImageClientOption {
return func(c *ImageClientOptions) {
c.transport = remote.DefaultTransport.(*http.Transport).Clone()
c.transport.TLSClientConfig = &tls.Config{RootCAs: rootCAs, MinVersion: tls.VersionTLS12}
}
}
// An ImageClient is an OCI registry client.
type ImageClient interface {
// Image pulls an OCI image.
Image(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error)
}
// An ImageCache caches OCI images.
type ImageCache interface {
Image(h ociv1.Hash) (ociv1.Image, error)
WriteImage(img ociv1.Image) error
}
// A HashCache maps OCI references to hashes.
type HashCache interface {
Hash(r name.Reference) (ociv1.Hash, error)
WriteHash(r name.Reference, h ociv1.Hash) error
}
// A RemoteClient fetches OCI image manifests.
type RemoteClient struct{}
// Image fetches an image manifest. The returned image lazily pulls its layers.
func (i *RemoteClient) Image(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
opts := parse(o...)
iOpts := []remote.Option{remote.WithContext(ctx)}
if opts.auth != nil {
iOpts = append(iOpts, remote.WithAuth(opts.auth))
}
if opts.transport != nil {
iOpts = append(iOpts, remote.WithTransport(opts.transport))
}
if opts.pull == ImagePullPolicyNever {
return nil, errors.New(errPullNever)
}
return remote.Image(ref, iOpts...)
}
// A CachingPuller pulls OCI images. Images are pulled either from a local cache
// or a remote depending on whether they are available locally and a supplied
// ImagePullPolicy.
type CachingPuller struct {
remote ImageClient
local ImageCache
mapping HashCache
}
// NewCachingPuller returns an OCI image puller with a local cache.
func NewCachingPuller(h HashCache, i ImageCache, r ImageClient) *CachingPuller {
return &CachingPuller{remote: r, local: i, mapping: h}
}
// Image pulls the supplied image and all of its layers. The supplied config
// determines where the image may be pulled from - i.e. the local store or a
// remote. Images that are pulled from a remote are cached in the local store.
func (f *CachingPuller) Image(ctx context.Context, r name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
opts := parse(o...)
switch opts.pull {
case ImagePullPolicyNever:
return f.never(r)
case ImagePullPolicyAlways:
return f.always(ctx, r, o...)
case ImagePullPolicyIfNotPresent:
fallthrough
default:
img, err := f.never(r)
if err == nil {
return img, nil
}
return f.always(ctx, r, o...)
}
}
func (f *CachingPuller) never(r name.Reference) (ociv1.Image, error) {
var h ociv1.Hash
var err error
// Avoid a cache lookup if the digest was specified explicitly.
switch d := r.(type) {
case name.Digest:
h, err = ociv1.NewHash(d.DigestStr())
default:
h, err = f.mapping.Hash(r)
}
if err != nil {
return nil, errors.Wrap(err, errLoadHash)
}
i, err := f.local.Image(h)
return i, errors.Wrap(err, errLoadImage)
}
func (f *CachingPuller) always(ctx context.Context, r name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
// This will only pull the image's manifest and config, not layers.
img, err := f.remote.Image(ctx, r, o...)
if err != nil {
return nil, errors.Wrap(err, errPullImage)
}
// This will fetch any layers that aren't already in the store.
if err := f.local.WriteImage(img); err != nil {
return nil, errors.Wrap(err, errStoreImage)
}
d, err := img.Digest()
if err != nil {
return nil, errors.Wrap(err, errImageDigest)
}
// Store a mapping from this reference to its digest.
if err := f.mapping.WriteHash(r, d); err != nil {
return nil, errors.Wrap(err, errStoreDigest)
}
// Return the stored image to ensure future reads are from disk, not
// from remote.
img, err = f.local.Image(d)
return img, errors.Wrap(err, errLoadImage)
}

View File

@ -1,402 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"context"
"crypto/x509"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-containerregistry/pkg/name"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
type MockImage struct {
ociv1.Image
MockDigest func() (ociv1.Hash, error)
}
func (i *MockImage) Digest() (ociv1.Hash, error) { return i.MockDigest() }
type MockImageClient struct {
MockImage func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error)
}
func (c *MockImageClient) Image(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
return c.MockImage(ctx, ref, o...)
}
type MockImageCache struct {
MockImage func(h ociv1.Hash) (ociv1.Image, error)
MockWriteImage func(img ociv1.Image) error
}
func (c *MockImageCache) Image(h ociv1.Hash) (ociv1.Image, error) {
return c.MockImage(h)
}
func (c *MockImageCache) WriteImage(img ociv1.Image) error {
return c.MockWriteImage(img)
}
type MockHashCache struct {
MockHash func(r name.Reference) (ociv1.Hash, error)
MockWriteHash func(r name.Reference, h ociv1.Hash) error
}
func (c *MockHashCache) Hash(r name.Reference) (ociv1.Hash, error) {
return c.MockHash(r)
}
func (c *MockHashCache) WriteHash(r name.Reference, h ociv1.Hash) error {
return c.MockWriteHash(r, h)
}
func TestImage(t *testing.T) {
errBoom := errors.New("boom")
coolImage := &MockImage{}
type args struct {
ctx context.Context
r name.Reference
o []ImageClientOption
}
type want struct {
i ociv1.Image
err error
}
cases := map[string]struct {
reason string
p *CachingPuller
args args
want want
}{
"NeverPullHashError": {
reason: "We should return an error if we must but can't read a hash from our HashStore.",
p: NewCachingPuller(
&MockHashCache{
MockHash: func(r name.Reference) (ociv1.Hash, error) { return ociv1.Hash{}, errBoom },
},
&MockImageCache{},
&MockImageClient{},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyNever)},
},
want: want{
err: errors.Wrap(errBoom, errLoadHash),
},
},
"NeverPullImageError": {
reason: "We should return an error if we must but can't read our image from cache.",
p: NewCachingPuller(
&MockHashCache{
MockHash: func(r name.Reference) (ociv1.Hash, error) { return ociv1.Hash{}, nil },
},
&MockImageCache{
MockImage: func(h ociv1.Hash) (ociv1.Image, error) { return nil, errBoom },
},
&MockImageClient{},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyNever)},
},
want: want{
err: errors.Wrap(errBoom, errLoadImage),
},
},
"NeverPullSuccess": {
reason: "We should return our image from cache.",
p: NewCachingPuller(
&MockHashCache{
MockHash: func(r name.Reference) (ociv1.Hash, error) { return ociv1.Hash{}, nil },
},
&MockImageCache{
MockImage: func(h ociv1.Hash) (ociv1.Image, error) { return coolImage, nil },
},
&MockImageClient{},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyNever)},
},
want: want{
i: coolImage,
},
},
"NeverPullSuccessExplicit": {
reason: "We should return our image from cache without looking up its digest if the digest was specified explicitly.",
p: NewCachingPuller(
&MockHashCache{},
&MockImageCache{
MockImage: func(h ociv1.Hash) (ociv1.Image, error) {
if h.Hex != "c34045c1a1db8d1b3fca8a692198466952daae07eaf6104b4c87ed3b55b6af1b" {
return nil, errors.New("unexpected hash")
}
return coolImage, nil
},
},
&MockImageClient{},
),
args: args{
r: name.MustParseReference("example.org/coolimage@sha256:c34045c1a1db8d1b3fca8a692198466952daae07eaf6104b4c87ed3b55b6af1b"),
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyNever)},
},
want: want{
i: coolImage,
},
},
"AlwaysPullRemoteError": {
reason: "We should return an error if we must but can't pull our image manifest from the remote.",
p: NewCachingPuller(
&MockHashCache{},
&MockImageCache{},
&MockImageClient{
MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
return nil, errBoom
},
},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyAlways)},
},
want: want{
err: errors.Wrap(errBoom, errPullImage),
},
},
"AlwaysPullWriteImageError": {
reason: "We should return an error if we must but can't write our image to the local cache.",
p: NewCachingPuller(
&MockHashCache{},
&MockImageCache{
MockWriteImage: func(img ociv1.Image) error { return errBoom },
},
&MockImageClient{
MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
return nil, nil
},
},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyAlways)},
},
want: want{
err: errors.Wrap(errBoom, errStoreImage),
},
},
"AlwaysPullImageDigestError": {
reason: "We should return an error if we can't get our image's digest.",
p: NewCachingPuller(
&MockHashCache{},
&MockImageCache{
MockWriteImage: func(img ociv1.Image) error { return nil },
},
&MockImageClient{
MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
return &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{}, errBoom },
}, nil
},
},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyAlways)},
},
want: want{
err: errors.Wrap(errBoom, errImageDigest),
},
},
"AlwaysPullWriteDigestError": {
reason: "We should return an error if we can't write our digest mapping to the cache.",
p: NewCachingPuller(
&MockHashCache{
MockWriteHash: func(r name.Reference, h ociv1.Hash) error { return errBoom },
},
&MockImageCache{
MockWriteImage: func(img ociv1.Image) error { return nil },
},
&MockImageClient{
MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
return &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{}, nil },
}, nil
},
},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyAlways)},
},
want: want{
err: errors.Wrap(errBoom, errStoreDigest),
},
},
"AlwaysPullImageError": {
reason: "We should return an error if we must but can't read our image back from cache.",
p: NewCachingPuller(
&MockHashCache{
MockWriteHash: func(r name.Reference, h ociv1.Hash) error { return nil },
},
&MockImageCache{
MockWriteImage: func(img ociv1.Image) error { return nil },
MockImage: func(h ociv1.Hash) (ociv1.Image, error) { return nil, errBoom },
},
&MockImageClient{
MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
return &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{}, nil },
}, nil
},
},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyAlways)},
},
want: want{
err: errors.Wrap(errBoom, errLoadImage),
},
},
"AlwaysPullSuccess": {
reason: "We should return a pulled and cached image.",
p: NewCachingPuller(
&MockHashCache{
MockWriteHash: func(r name.Reference, h ociv1.Hash) error { return nil },
},
&MockImageCache{
MockWriteImage: func(img ociv1.Image) error { return nil },
MockImage: func(h ociv1.Hash) (ociv1.Image, error) { return &MockImage{}, nil },
},
&MockImageClient{
MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
return &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{}, nil },
}, nil
},
},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyAlways)},
},
want: want{
i: &MockImage{},
},
},
"PullWithCustomCA": {
reason: "We should return a pulled and cached image.",
p: NewCachingPuller(
&MockHashCache{
MockHash: func(r name.Reference) (ociv1.Hash, error) {
return ociv1.Hash{}, errors.New("this error should not be returned")
},
MockWriteHash: func(r name.Reference, h ociv1.Hash) error {
return nil
},
},
&MockImageCache{
MockWriteImage: func(img ociv1.Image) error { return nil },
MockImage: func(h ociv1.Hash) (ociv1.Image, error) { return &MockImage{}, nil },
},
&MockImageClient{
MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
if len(o) != 1 {
return nil, errors.New("the number of options should be one")
}
c := &ImageClientOptions{}
o[0](c)
if c.transport == nil {
return nil, errors.New("Transport should be set")
}
return &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{}, nil },
}, nil
},
},
),
args: args{
o: []ImageClientOption{WithCustomCA(&x509.CertPool{})},
},
want: want{
i: &MockImage{},
},
},
"IfNotPresentTriesCacheFirst": {
reason: "The IfNotPresent policy should try to read from cache first.",
p: NewCachingPuller(
&MockHashCache{
MockHash: func(r name.Reference) (ociv1.Hash, error) { return ociv1.Hash{}, nil },
},
&MockImageCache{
MockImage: func(h ociv1.Hash) (ociv1.Image, error) { return &MockImage{}, nil },
},
&MockImageClient{
// If we get here it indicates we called always.
MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
return nil, errors.New("this error should not be returned")
},
},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyIfNotPresent)},
},
want: want{
i: &MockImage{},
},
},
"IfNotPresentFallsBackToRemote": {
reason: "The IfNotPresent policy should fall back to pulling from the remote if it can't read the image from cache.",
p: NewCachingPuller(
&MockHashCache{
MockHash: func(r name.Reference) (ociv1.Hash, error) {
// Trigger a fall-back from never to always.
return ociv1.Hash{}, errors.New("this error should not be returned")
},
},
&MockImageCache{},
&MockImageClient{
MockImage: func(ctx context.Context, ref name.Reference, o ...ImageClientOption) (ociv1.Image, error) {
return nil, errBoom
},
},
),
args: args{
o: []ImageClientOption{WithPullPolicy(ImagePullPolicyIfNotPresent)},
},
want: want{
// This indicates we fell back to always.
err: errors.Wrap(errBoom, errPullImage),
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
i, err := tc.p.Image(tc.args.ctx, tc.args.r, tc.args.o...)
if diff := cmp.Diff(tc.want.i, i); diff != "" {
t.Errorf("\n%s\nImage(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nImage(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -1,81 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spec
// The below is all copied from k/k to avoid taking a dependency.
// https://github.com/kubernetes/kubernetes/blob/685d639/pkg/kubelet/cm/helpers_linux.go
const (
// These limits are defined in the kernel:
// https://github.com/torvalds/linux/blob/0bddd227f3dc55975e2b8dfa7fc6f959b062a2c7/kernel/sched/sched.h#L427-L428
minShares = 2
maxShares = 262144
sharesPerCPU = 1024
milliCPUToCPU = 1000
// 100000 microseconds is equivalent to 100ms
quotaPeriod = 100000
// 1000 microseconds is equivalent to 1ms
// defined here:
// https://github.com/torvalds/linux/blob/cac03ac368fabff0122853de2422d4e17a32de08/kernel/sched/core.c#L10546
minQuotaPeriod = 1000
)
// milliCPUToShares converts the milliCPU to CFS shares.
func milliCPUToShares(milliCPU int64) uint64 {
if milliCPU == 0 {
// Docker converts zero milliCPU to unset, which maps to kernel default
// for unset: 1024. Return 2 here to really match kernel default for
// zero milliCPU.
return minShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
shares := (milliCPU * sharesPerCPU) / milliCPUToCPU
if shares < minShares {
return minShares
}
if shares > maxShares {
return maxShares
}
return uint64(shares)
}
// milliCPUToQuota converts milliCPU to CFS quota and period values.
// Input parameters and resulting value is number of microseconds.
func milliCPUToQuota(milliCPU int64, period int64) (quota int64) {
// CFS quota is measured in two values:
// - cfs_period_us=100ms (the amount of time to measure usage across given by period)
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
// so in the above example, you are limited to 20% of a single CPU
// for multi-cpu environments, you just scale equivalent amounts
// see https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt for details
if milliCPU == 0 {
return
}
// we then convert your milliCPU to a value normalized over a period
quota = (milliCPU * period) / milliCPUToCPU
// quota needs to be a minimum of 1ms.
if quota < minQuotaPeriod {
quota = minQuotaPeriod
}
return
}

View File

@ -1,601 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package spec implements OCI runtime spec support.
package spec
import (
"encoding/csv"
"encoding/json"
"io"
"os"
"strconv"
"strings"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
runtime "github.com/opencontainers/runtime-spec/specs-go"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
const (
errApplySpecOption = "cannot apply spec option"
errNew = "cannot create new spec"
errMarshal = "cannot marshal spec to JSON"
errWriteFile = "cannot write file"
errParseCPULimit = "cannot parse CPU limit"
errParseMemoryLimit = "cannot parse memory limit"
errNoCmd = "OCI image must specify entrypoint and/or cmd"
errParsePasswd = "cannot parse passwd file data"
errParseGroup = "cannot parse group file data"
errResolveUser = "cannot resolve user specified by OCI image config"
errNonIntegerUID = "cannot parse non-integer UID"
errNonIntegerGID = "cannot parse non-integer GID"
errOpenPasswdFile = "cannot open passwd file"
errOpenGroupFile = "cannot open group file"
errParsePasswdFiles = "cannot parse container's /etc/passwd and/or /etc/group files"
errFmtTooManyColons = "cannot parse user %q (too many colon separators)"
errFmtNonExistentUser = "cannot resolve UID of user %q that doesn't exist in container's /etc/passwd"
errFmtNonExistentGroup = "cannot resolve GID of group %q that doesn't exist in container's /etc/group"
)
// An Option specifies optional OCI runtime configuration.
type Option func(s *runtime.Spec) error
// New produces a new OCI runtime spec (i.e. config.json).
func New(o ...Option) (*runtime.Spec, error) {
// NOTE(negz): Most of this is what `crun spec --rootless` produces.
spec := &runtime.Spec{
Version: runtime.Version,
Process: &runtime.Process{
User: runtime.User{UID: 0, GID: 0},
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
Cwd: "/",
Capabilities: &runtime.LinuxCapabilities{
Bounding: []string{
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE",
},
Effective: []string{
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE",
},
Permitted: []string{
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE",
},
Ambient: []string{
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE",
},
},
Rlimits: []runtime.POSIXRlimit{
{
Type: "RLIMIT_NOFILE",
Hard: 1024,
Soft: 1024,
},
},
},
Hostname: "xfn",
Mounts: []runtime.Mount{
{
Type: "bind",
Destination: "/proc",
Source: "/proc",
Options: []string{"nosuid", "noexec", "nodev", "rbind"},
},
{
Type: "tmpfs",
Destination: "/dev",
Source: "tmpfs",
Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
},
{
Type: "tmpfs",
Destination: "/tmp",
Source: "tmp",
Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
},
{
Type: "bind",
Destination: "/sys",
Source: "/sys",
Options: []string{"rprivate", "nosuid", "noexec", "nodev", "ro", "rbind"},
},
{
Destination: "/dev/pts",
Type: "devpts",
Source: "devpts",
Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"},
},
{
Destination: "/dev/mqueue",
Type: "mqueue",
Source: "mqueue",
Options: []string{"nosuid", "noexec", "nodev"},
},
{
Destination: "/sys/fs/cgroup",
Type: "cgroup",
Source: "cgroup",
Options: []string{"rprivate", "nosuid", "noexec", "nodev", "relatime", "ro"},
},
},
// TODO(negz): Do we need a seccomp policy? Our host probably has one.
Linux: &runtime.Linux{
Resources: &runtime.LinuxResources{
Devices: []runtime.LinuxDeviceCgroup{
{
Allow: false,
Access: "rwm",
},
},
Pids: &runtime.LinuxPids{
Limit: 32768,
},
},
Namespaces: []runtime.LinuxNamespace{
{Type: runtime.PIDNamespace},
{Type: runtime.IPCNamespace},
{Type: runtime.UTSNamespace},
{Type: runtime.MountNamespace},
{Type: runtime.CgroupNamespace},
{Type: runtime.NetworkNamespace},
},
MaskedPaths: []string{
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware",
"/sys/fs/selinux",
"/sys/dev/block",
},
ReadonlyPaths: []string{
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger",
},
},
}
for _, fn := range o {
if err := fn(spec); err != nil {
return nil, errors.Wrap(err, errApplySpecOption)
}
}
return spec, nil
}
// Write an OCI runtime spec to the supplied path.
func Write(path string, o ...Option) error {
s, err := New(o...)
if err != nil {
return errors.Wrap(err, errNew)
}
b, err := json.Marshal(s)
if err != nil {
return errors.Wrap(err, errMarshal)
}
return errors.Wrap(os.WriteFile(path, b, 0o600), errWriteFile)
}
// WithRootFS configures a container's rootfs.
func WithRootFS(path string, readonly bool) Option {
return func(s *runtime.Spec) error {
s.Root = &runtime.Root{
Path: path,
Readonly: readonly,
}
return nil
}
}
// TODO(negz): Does it make sense to convert Kubernetes-style resource
// quantities into cgroup limits here, or should our gRPC API accept cgroup
// style limits like the CRI API does?
// WithCPULimit limits the container's CPU usage per the supplied
// Kubernetes-style limit string (e.g. 0.5 or 500m for half a core).
func WithCPULimit(limit string) Option {
return func(s *runtime.Spec) error {
q, err := resource.ParseQuantity(limit)
if err != nil {
return errors.Wrap(err, errParseCPULimit)
}
shares := milliCPUToShares(q.MilliValue())
quota := milliCPUToQuota(q.MilliValue(), quotaPeriod)
if s.Linux == nil {
s.Linux = &runtime.Linux{}
}
if s.Linux.Resources == nil {
s.Linux.Resources = &runtime.LinuxResources{}
}
s.Linux.Resources.CPU = &runtime.LinuxCPU{
Shares: &shares,
Quota: &quota,
}
return nil
}
}
// WithMemoryLimit limits the container's memory usage per the supplied
// Kubernetes-style limit string (e.g. 512Mi).
func WithMemoryLimit(limit string) Option {
return func(s *runtime.Spec) error {
q, err := resource.ParseQuantity(limit)
if err != nil {
return errors.Wrap(err, errParseMemoryLimit)
}
limit := q.Value()
if s.Linux == nil {
s.Linux = &runtime.Linux{}
}
if s.Linux.Resources == nil {
s.Linux.Resources = &runtime.LinuxResources{}
}
s.Linux.Resources.Memory = &runtime.LinuxMemory{
Limit: &limit,
}
return nil
}
}
// WithHostNetwork configures the container to share the host's (i.e. xfn
// container's) network namespace.
func WithHostNetwork() Option {
return func(s *runtime.Spec) error {
s.Mounts = append(s.Mounts, runtime.Mount{
Type: "bind",
Destination: "/etc/resolv.conf",
Source: "/etc/resolv.conf",
Options: []string{"rbind", "ro"},
})
if s.Linux == nil {
return nil
}
// We share the host's network by removing any network namespaces.
filtered := make([]runtime.LinuxNamespace, 0, len(s.Linux.Namespaces))
for _, ns := range s.Linux.Namespaces {
if ns.Type == runtime.NetworkNamespace {
continue
}
filtered = append(filtered, ns)
}
s.Linux.Namespaces = filtered
return nil
}
}
// WithImageConfig extends a Spec with configuration derived from an OCI image
// config file. If the image config specifies a user it will be resolved using
// the supplied passwd and group files.
func WithImageConfig(cfg *ociv1.ConfigFile, passwd, group string) Option {
return func(s *runtime.Spec) error {
if cfg.Config.Hostname != "" {
s.Hostname = cfg.Config.Hostname
}
args := make([]string, 0, len(cfg.Config.Entrypoint)+len(cfg.Config.Cmd))
args = append(args, cfg.Config.Entrypoint...)
args = append(args, cfg.Config.Cmd...)
if len(args) == 0 {
return errors.New(errNoCmd)
}
if s.Process == nil {
s.Process = &runtime.Process{}
}
s.Process.Args = args
s.Process.Env = append(s.Process.Env, cfg.Config.Env...)
if cfg.Config.WorkingDir != "" {
s.Process.Cwd = cfg.Config.WorkingDir
}
if cfg.Config.User != "" {
p, err := ParsePasswdFiles(passwd, group)
if err != nil {
return errors.Wrap(err, errParsePasswdFiles)
}
if err := WithUser(cfg.Config.User, p)(s); err != nil {
return errors.Wrap(err, errResolveUser)
}
}
return nil
}
}
// A Username within an /etc/passwd file.
type Username string
// A Groupname within an /etc/group file.
type Groupname string
// A UID within an /etc/passwd file.
type UID int
// A GID within an /etc/passwd or /etc/group file.
type GID int
// Unknown UID and GIDs.
const (
UnknownUID = UID(-1)
UnknownGID = GID(-1)
)
// Passwd (and group) file data.
type Passwd struct {
UID map[Username]UID
GID map[Groupname]GID
Groups map[UID]Groups
}
// Groups represents a user's groups.
type Groups struct {
// Elsewhere we use types like UID and GID for self-documenting map keys. We
// use uint32 here for convenience. It's what runtime.User wants and we
// don't want to have to convert a slice of GID to a slice of uint32.
PrimaryGID uint32
AdditionalGIDs []uint32
}
// ParsePasswdFiles parses the passwd and group files at the supplied paths. If
// either path does not exist it returns empty Passwd data.
func ParsePasswdFiles(passwd, group string) (Passwd, error) {
p, err := os.Open(passwd) //nolint:gosec // We intentionally take a variable here.
if errors.Is(err, os.ErrNotExist) {
return Passwd{}, nil
}
if err != nil {
return Passwd{}, errors.Wrap(err, errOpenPasswdFile)
}
defer p.Close() //nolint:errcheck // Only open for reading.
g, err := os.Open(group) //nolint:gosec // We intentionally take a variable here.
if errors.Is(err, os.ErrNotExist) {
return Passwd{}, nil
}
if err != nil {
return Passwd{}, errors.Wrap(err, errOpenGroupFile)
}
defer g.Close() //nolint:errcheck // Only open for reading.
return ParsePasswd(p, g)
}
// ParsePasswd parses the supplied passwd and group data.
func ParsePasswd(passwd, group io.Reader) (Passwd, error) { //nolint:gocyclo // Breaking each loop into its own function seems more complicated.
out := Passwd{
UID: make(map[Username]UID),
GID: make(map[Groupname]GID),
Groups: make(map[UID]Groups),
}
// Formatted as name:password:UID:GID:GECOS:directory:shell
p := csv.NewReader(passwd)
p.Comma = ':'
p.Comment = '#'
p.TrimLeadingSpace = true
p.FieldsPerRecord = 7 // len(r) will be guaranteed to be 7.
for {
r, err := p.Read()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return Passwd{}, errors.Wrap(err, errParsePasswd)
}
username := r[0]
uid, err := strconv.ParseUint(r[2], 10, 32)
if err != nil {
return Passwd{}, errors.Wrap(err, errNonIntegerUID)
}
gid, err := strconv.ParseUint(r[3], 10, 32)
if err != nil {
return Passwd{}, errors.Wrap(err, errNonIntegerGID)
}
out.UID[Username(username)] = UID(uid)
out.Groups[UID(uid)] = Groups{PrimaryGID: uint32(gid)}
}
// Formatted as group_name:password:GID:comma_separated_user_list
g := csv.NewReader(group)
g.Comma = ':'
g.Comment = '#'
g.TrimLeadingSpace = true
g.FieldsPerRecord = 4 // len(r) will be guaranteed to be 4.
for {
r, err := g.Read()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return Passwd{}, errors.Wrap(err, errParseGroup)
}
groupname := r[0]
gid, err := strconv.ParseUint(r[2], 10, 32)
if err != nil {
return Passwd{}, errors.Wrap(err, errNonIntegerGID)
}
out.GID[Groupname(groupname)] = GID(gid)
users := r[3]
// This group has no users (except those with membership via passwd).
if users == "" {
continue
}
for _, u := range strings.Split(users, ",") {
uid, ok := out.UID[Username(u)]
if !ok || gid == uint64(out.Groups[uid].PrimaryGID) {
// Either this user doesn't exist, or they do and the group is
// their primary group. Either way we want to skip it.
continue
}
g := out.Groups[uid]
g.AdditionalGIDs = append(g.AdditionalGIDs, uint32(gid))
out.Groups[uid] = g
}
}
return out, nil
}
// WithUser resolves an OCI image config user string in order to set the spec's
// process user. According to the OCI image config v1.0 spec: "For Linux based
// systems, all of the following are valid: user, uid, user:group, uid:gid,
// uid:group, user:gid. If group/GID is not specified, the default group and
// supplementary groups of the given user/UID in /etc/passwd from the container
// are applied."
func WithUser(user string, p Passwd) Option {
return func(s *runtime.Spec) error {
if s.Process == nil {
s.Process = &runtime.Process{}
}
parts := strings.Split(user, ":")
switch len(parts) {
case 1:
return WithUserOnly(parts[0], p)(s)
case 2:
return WithUserAndGroup(parts[0], parts[1], p)(s)
default:
return errors.Errorf(errFmtTooManyColons, user)
}
}
}
// WithUserOnly resolves an OCI Image config user string in order to set the
// spec's process user. The supplied user string must either be an integer UID
// (that may or may not exist in the container's /etc/passwd) or a username that
// exists in the container's /etc/passwd. The supplied user string must not
// contain any group information.
func WithUserOnly(user string, p Passwd) Option {
return func(s *runtime.Spec) error {
if s.Process == nil {
s.Process = &runtime.Process{}
}
uid := UnknownUID
// If user is an integer we treat it as a UID.
if v, err := strconv.ParseUint(user, 10, 32); err == nil {
uid = UID(v)
}
// If user is not an integer we must resolve it to one using data
// extracted from the container's passwd file.
if uid == UnknownUID {
v, ok := p.UID[Username(user)]
if !ok {
return errors.Errorf(errFmtNonExistentUser, user)
}
uid = v
}
// At this point the UID was either explicitly specified or
// resolved. Note that if the UID doesn't exist in the supplied
// passwd and group data we'll set its GID to 0. This behaviour isn't
// specified by the OCI spec, but matches what containerd does.
s.Process.User = runtime.User{
UID: uint32(uid),
GID: p.Groups[uid].PrimaryGID,
AdditionalGids: p.Groups[uid].AdditionalGIDs,
}
return nil
}
}
// WithUserAndGroup resolves an OCI image config user string in order to set the
// spec's process user. The supplied user string must either be an integer UID
// (that may or may not exist in the container's /etc/passwd) or a username that
// exists in the container's /etc/passwd. The supplied group must either be an
// integer GID (that may or may not exist in the container's /etc/group) or a
// group name that exists in the container's /etc/group.
func WithUserAndGroup(user, group string, p Passwd) Option {
return func(s *runtime.Spec) error {
if s.Process == nil {
s.Process = &runtime.Process{}
}
uid, gid := UnknownUID, UnknownGID
// If user and/or group are integers we treat them as UID/GIDs.
if v, err := strconv.ParseUint(user, 10, 32); err == nil {
uid = UID(v)
}
if v, err := strconv.ParseUint(group, 10, 32); err == nil {
gid = GID(v)
}
// If user and/or group weren't integers we must resolve them to a
// UID/GID that exists within the container's passwd/group files.
if uid == UnknownUID {
v, ok := p.UID[Username(user)]
if !ok {
return errors.Errorf(errFmtNonExistentUser, user)
}
uid = v
}
if gid == UnknownGID {
v, ok := p.GID[Groupname(group)]
if !ok {
return errors.Errorf(errFmtNonExistentGroup, group)
}
gid = v
}
// At this point the UID and GID were either explicitly specified or
// resolved. All we need to do is supply any additional GIDs.
s.Process.User = runtime.User{
UID: uint32(uid),
GID: uint32(gid),
AdditionalGids: p.Groups[uid].AdditionalGIDs,
}
return nil
}
}

View File

@ -1,931 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spec
import (
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
runtime "github.com/opencontainers/runtime-spec/specs-go"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
type TestBundle struct{ path string }
func (b TestBundle) Path() string { return b.path }
func (b TestBundle) Cleanup() error { return os.RemoveAll(b.path) }
func TestNew(t *testing.T) {
errBoom := errors.New("boom")
type args struct {
o []Option
}
type want struct {
s *runtime.Spec
err error
}
cases := map[string]struct {
reason string
args args
want want
}{
"InvalidOption": {
reason: "We should return an error if the supplied option is invalid.",
args: args{
o: []Option{func(s *runtime.Spec) error { return errBoom }},
},
want: want{
err: errors.Wrap(errBoom, errApplySpecOption),
},
},
"Minimal": {
reason: "It should be possible to apply an option to a new spec.",
args: args{
o: []Option{func(s *runtime.Spec) error {
s.Annotations = map[string]string{"cool": "very"}
return nil
}},
},
want: want{
s: func() *runtime.Spec {
s, _ := New()
s.Annotations = map[string]string{"cool": "very"}
return s
}(),
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
got, err := New(tc.args.o...)
if diff := cmp.Diff(tc.want.s, got); diff != "" {
t.Errorf("\n%s\nCreate(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nCreate(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWithCPULimit(t *testing.T) {
var shares uint64 = 512
var quota int64 = 50000
type args struct {
limit string
}
type want struct {
s *runtime.Spec
err error
}
cases := map[string]struct {
reason string
s *runtime.Spec
args args
want want
}{
"ParseLimitError": {
reason: "We should return any error encountered while parsing the CPU limit.",
s: &runtime.Spec{},
args: args{
limit: "",
},
want: want{
s: &runtime.Spec{},
err: errors.Wrap(resource.ErrFormatWrong, errParseCPULimit),
},
},
"SuccessMilliCPUs": {
reason: "We should set shares and quota according to the supplied milliCPUs.",
s: &runtime.Spec{},
args: args{
limit: "500m",
},
want: want{
s: &runtime.Spec{
Linux: &runtime.Linux{
Resources: &runtime.LinuxResources{
CPU: &runtime.LinuxCPU{
Shares: &shares,
Quota: &quota,
},
},
},
},
},
},
"SuccessCores": {
reason: "We should set shares and quota according to the supplied cores.",
s: &runtime.Spec{},
args: args{
limit: "0.5",
},
want: want{
s: &runtime.Spec{
Linux: &runtime.Linux{
Resources: &runtime.LinuxResources{
CPU: &runtime.LinuxCPU{
Shares: &shares,
Quota: &quota,
},
},
},
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := WithCPULimit(tc.args.limit)(tc.s)
if diff := cmp.Diff(tc.want.s, tc.s, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("\n%s\nWithCPULimit(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nWithCPULimit(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWithMemoryLimit(t *testing.T) {
var limit int64 = 512 * 1024 * 1024
type args struct {
limit string
}
type want struct {
s *runtime.Spec
err error
}
cases := map[string]struct {
reason string
s *runtime.Spec
args args
want want
}{
"ParseLimitError": {
reason: "We should return any error encountered while parsing the memory limit.",
s: &runtime.Spec{},
args: args{
limit: "",
},
want: want{
s: &runtime.Spec{},
err: errors.Wrap(resource.ErrFormatWrong, errParseMemoryLimit),
},
},
"Success": {
reason: "We should set the supplied memory limit.",
s: &runtime.Spec{},
args: args{
limit: "512Mi",
},
want: want{
s: &runtime.Spec{
Linux: &runtime.Linux{
Resources: &runtime.LinuxResources{
Memory: &runtime.LinuxMemory{
Limit: &limit,
},
},
},
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := WithMemoryLimit(tc.args.limit)(tc.s)
if diff := cmp.Diff(tc.want.s, tc.s, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("\n%s\nWithMemoryLimit(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nWithMemoryLimit(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWithHostNetwork(t *testing.T) {
type want struct {
s *runtime.Spec
err error
}
cases := map[string]struct {
reason string
s *runtime.Spec
want want
}{
"RemoveNetworkNamespace": {
reason: "We should remote the network namespace if it exists.",
s: &runtime.Spec{
Linux: &runtime.Linux{
Namespaces: []runtime.LinuxNamespace{
{Type: runtime.CgroupNamespace},
{Type: runtime.NetworkNamespace},
},
},
},
want: want{
s: &runtime.Spec{
Mounts: []runtime.Mount{{
Type: "bind",
Destination: "/etc/resolv.conf",
Source: "/etc/resolv.conf",
Options: []string{"rbind", "ro"},
}},
Linux: &runtime.Linux{
Namespaces: []runtime.LinuxNamespace{
{Type: runtime.CgroupNamespace},
},
},
},
},
},
"EmptySpec": {
reason: "We should handle an empty spec without issue.",
s: &runtime.Spec{},
want: want{
s: &runtime.Spec{
Mounts: []runtime.Mount{{
Type: "bind",
Destination: "/etc/resolv.conf",
Source: "/etc/resolv.conf",
Options: []string{"rbind", "ro"},
}},
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := WithHostNetwork()(tc.s)
if diff := cmp.Diff(tc.want.s, tc.s, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("\n%s\nWithHostNetwork(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nWithHostNetwork(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWithImageConfig(t *testing.T) {
type args struct {
cfg *ociv1.ConfigFile
passwd string
group string
}
type want struct {
s *runtime.Spec
err error
}
cases := map[string]struct {
reason string
s *runtime.Spec
args args
want want
}{
"NoCommand": {
reason: "We should return an error if the supplied image config has no entrypoint and no cmd.",
s: &runtime.Spec{},
args: args{
cfg: &ociv1.ConfigFile{},
},
want: want{
s: &runtime.Spec{},
err: errors.New(errNoCmd),
},
},
"UnresolvableUser": {
reason: "We should return an error if there is no passwd data and a string username.",
s: &runtime.Spec{},
args: args{
cfg: &ociv1.ConfigFile{
Config: ociv1.Config{
Entrypoint: []string{"/bin/sh"},
User: "negz",
},
},
},
want: want{
s: &runtime.Spec{
Process: &runtime.Process{
Args: []string{"/bin/sh"},
},
},
err: errors.Wrap(errors.Errorf(errFmtNonExistentUser, "negz"), errResolveUser),
},
},
"Success": {
reason: "We should build a runtime config from the supplied image config.",
s: &runtime.Spec{},
args: args{
cfg: &ociv1.ConfigFile{
Config: ociv1.Config{
Hostname: "coolhost",
Entrypoint: []string{"/bin/sh"},
Cmd: []string{"cool"},
Env: []string{"COOL=very"},
WorkingDir: "/",
User: "1000:100",
},
},
},
want: want{
s: &runtime.Spec{
Process: &runtime.Process{
Args: []string{"/bin/sh", "cool"},
Env: []string{"COOL=very"},
Cwd: "/",
User: runtime.User{
UID: 1000,
GID: 100,
},
},
Hostname: "coolhost",
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := WithImageConfig(tc.args.cfg, tc.args.passwd, tc.args.group)(tc.s)
if diff := cmp.Diff(tc.want.s, tc.s, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("\n%s\nWithImageConfig(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nWithImageConfig(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestParsePasswd(t *testing.T) {
passwd := `
# Ensure that comments and leading whitespace are supported.
root:x:0:0:System administrator:/root:/run/current-system/sw/bin/zsh
negz:x:1000:100::/home/negz:/run/current-system/sw/bin/zsh
primary:x:1001:100::/home/primary:/run/current-system/sw/bin/zsh
`
group := `
root:x:0:
wheel:x:1:negz
# This is primary's primary group, and doesnotexist doesn't exist in passwd.
users:x:100:primary,doesnotexist
`
type args struct {
passwd io.Reader
group io.Reader
}
type want struct {
p Passwd
err error
}
cases := map[string]struct {
reason string
args args
want want
}{
"EmptyFiles": {
reason: "We should return an empty Passwd when both files are empty.",
args: args{
passwd: strings.NewReader(""),
group: strings.NewReader(""),
},
want: want{
p: Passwd{},
},
},
// TODO(negz): Should we try fuzz this?
"MalformedPasswd": {
reason: "We should return an error when the passwd file is malformed.",
args: args{
passwd: strings.NewReader("@!#!:f"),
group: strings.NewReader(""),
},
want: want{
err: errors.Wrap(errors.New("record on line 1: wrong number of fields"), errParsePasswd),
},
},
"MalformedGroup": {
reason: "We should return an error when the group file is malformed.",
args: args{
passwd: strings.NewReader(""),
group: strings.NewReader("@!#!:f"),
},
want: want{
err: errors.Wrap(errors.New("record on line 1: wrong number of fields"), errParseGroup),
},
},
"NonIntegerPasswdUID": {
reason: "We should return an error when the passwd file contains a non-integer uid.",
args: args{
passwd: strings.NewReader("username:password:uid:gid:gecos:homedir:shell"),
group: strings.NewReader(""),
},
want: want{
err: errors.Wrap(errors.New("strconv.ParseUint: parsing \"uid\": invalid syntax"), errNonIntegerUID),
},
},
"NonIntegerPasswdGID": {
reason: "We should return an error when the passwd file contains a non-integer gid.",
args: args{
passwd: strings.NewReader("username:password:42:gid:gecos:homedir:shell"),
group: strings.NewReader(""),
},
want: want{
err: errors.Wrap(errors.New("strconv.ParseUint: parsing \"gid\": invalid syntax"), errNonIntegerGID),
},
},
"NonIntegerGroupGID": {
reason: "We should return an error when the group file contains a non-integer gid.",
args: args{
passwd: strings.NewReader(""),
group: strings.NewReader("groupname:password:gid:username"),
},
want: want{
err: errors.Wrap(errors.New("strconv.ParseUint: parsing \"gid\": invalid syntax"), errNonIntegerGID),
},
},
"Success": {
reason: "We should successfully parse well formatted passwd and group files.",
args: args{
passwd: strings.NewReader(passwd),
group: strings.NewReader(group),
},
want: want{
p: Passwd{
UID: map[Username]UID{
"root": 0,
"negz": 1000,
"primary": 1001,
},
GID: map[Groupname]GID{
"root": 0,
"wheel": 1,
"users": 100,
},
Groups: map[UID]Groups{
0: {PrimaryGID: 0},
1000: {PrimaryGID: 100, AdditionalGIDs: []uint32{1}},
1001: {PrimaryGID: 100},
},
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
got, err := ParsePasswd(tc.args.passwd, tc.args.group)
if diff := cmp.Diff(tc.want.p, got, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("\n%s\nParsePasswd(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nParsePasswd(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestParsePasswdFiles(t *testing.T) {
passwd := `
# Ensure that comments and leading whitespace are supported.
root:x:0:0:System administrator:/root:/run/current-system/sw/bin/zsh
negz:x:1000:100::/home/negz:/run/current-system/sw/bin/zsh
primary:x:1001:100::/home/primary:/run/current-system/sw/bin/zsh
`
group := `
root:x:0:
wheel:x:1:negz
# This is primary's primary group, and doesnotexist doesn't exist in passwd.
users:x:100:primary,doesnotexist
`
tmp, err := os.MkdirTemp(os.TempDir(), t.Name())
if err != nil {
t.Fatalf(err.Error())
}
defer os.RemoveAll(tmp)
_ = os.WriteFile(filepath.Join(tmp, "passwd"), []byte(passwd), 0600)
_ = os.WriteFile(filepath.Join(tmp, "group"), []byte(group), 0600)
type args struct {
passwd string
group string
}
type want struct {
p Passwd
err error
}
cases := map[string]struct {
reason string
args args
want want
}{
"NoPasswdFile": {
reason: "We should not return an error if the passwd file doesn't exist.",
args: args{
passwd: filepath.Join(tmp, "nonexist"),
group: filepath.Join(tmp, "group"),
},
want: want{
p: Passwd{},
},
},
"NoGroupFile": {
reason: "We should not return an error if the group file doesn't exist.",
args: args{
passwd: filepath.Join(tmp, "passwd"),
group: filepath.Join(tmp, "nonexist"),
},
want: want{
p: Passwd{},
},
},
"Success": {
reason: "We should successfully parse well formatted passwd and group files.",
args: args{
passwd: filepath.Join(tmp, "passwd"),
group: filepath.Join(tmp, "group"),
},
want: want{
p: Passwd{
UID: map[Username]UID{
"root": 0,
"negz": 1000,
"primary": 1001,
},
GID: map[Groupname]GID{
"root": 0,
"wheel": 1,
"users": 100,
},
Groups: map[UID]Groups{
0: {PrimaryGID: 0},
1000: {PrimaryGID: 100, AdditionalGIDs: []uint32{1}},
1001: {PrimaryGID: 100},
},
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
got, err := ParsePasswdFiles(tc.args.passwd, tc.args.group)
if diff := cmp.Diff(tc.want.p, got, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("\n%s\nParsePasswd(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nParsePasswd(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWithUser(t *testing.T) {
type args struct {
user string
p Passwd
}
type want struct {
s *runtime.Spec
err error
}
// NOTE(negz): We 'test through' here only to test that WithUser can
// distinguish a user (only) from a user and group and route them to the
// right place; see TestWithUserOnly and TestWithUserAndGroup.
cases := map[string]struct {
reason string
s *runtime.Spec
args args
want want
}{
"TooManyColons": {
reason: "We should return an error if the supplied user string contains more than one colon separator.",
s: &runtime.Spec{},
args: args{
user: "user:group:wat",
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{}},
err: errors.Errorf(errFmtTooManyColons, "user:group:wat"),
},
},
"UIDOnly": {
reason: "We should handle a user string that is a UID without error.",
s: &runtime.Spec{},
args: args{
user: "1000",
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{
User: runtime.User{
UID: 1000,
},
}},
},
},
"UIDAndGID": {
reason: "We should handle a user string that is a UID and GID without error.",
s: &runtime.Spec{},
args: args{
user: "1000:100",
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{
User: runtime.User{
UID: 1000,
GID: 100,
},
}},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := WithUser(tc.args.user, tc.args.p)(tc.s)
if diff := cmp.Diff(tc.want.s, tc.s, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("\n%s\nWithUser(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nWithUser(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWithUserOnly(t *testing.T) {
type args struct {
user string
p Passwd
}
type want struct {
s *runtime.Spec
err error
}
cases := map[string]struct {
reason string
s *runtime.Spec
args args
want want
}{
"UIDOnly": {
reason: "We should handle a user string that is a UID without error.",
s: &runtime.Spec{},
args: args{
user: "1000",
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{
User: runtime.User{
UID: 1000,
},
}},
},
},
"ResolveUIDGroups": {
reason: "We should 'resolve' a UID's groups per the supplied Passwd data.",
s: &runtime.Spec{},
args: args{
user: "1000",
p: Passwd{
Groups: map[UID]Groups{
1000: {
PrimaryGID: 100,
AdditionalGIDs: []uint32{1},
},
},
},
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{
User: runtime.User{
UID: 1000,
GID: 100,
AdditionalGids: []uint32{1},
},
}},
},
},
"NonExistentUser": {
reason: "We should return an error if the supplied username doesn't exist in the supplied Passwd data.",
s: &runtime.Spec{},
args: args{
user: "doesnotexist",
p: Passwd{},
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{}},
err: errors.Errorf(errFmtNonExistentUser, "doesnotexist"),
},
},
"ResolveUserToUID": {
reason: "We should 'resolve' a username to a UID per the supplied Passwd data.",
s: &runtime.Spec{},
args: args{
user: "negz",
p: Passwd{
UID: map[Username]UID{
"negz": 1000,
},
},
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{
User: runtime.User{
UID: 1000,
},
}},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := WithUserOnly(tc.args.user, tc.args.p)(tc.s)
if diff := cmp.Diff(tc.want.s, tc.s, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("\n%s\nWithUserOnly(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nWithUserOnly(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWithUserAndGroup(t *testing.T) {
type args struct {
user string
group string
p Passwd
}
type want struct {
s *runtime.Spec
err error
}
cases := map[string]struct {
reason string
s *runtime.Spec
args args
want want
}{
"UIDAndGID": {
reason: "We should handle a UID and GID without error.",
s: &runtime.Spec{},
args: args{
user: "1000",
group: "100",
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{
User: runtime.User{
UID: 1000,
GID: 100,
},
}},
},
},
"ResolveAdditionalGIDs": {
reason: "We should resolve any additional GIDs in the supplied Passwd data.",
s: &runtime.Spec{},
args: args{
user: "1000",
group: "100",
p: Passwd{
Groups: map[UID]Groups{
1000: {
PrimaryGID: 42, // This should be ignored, since an explicit GID was supplied.
AdditionalGIDs: []uint32{1},
},
},
},
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{
User: runtime.User{
UID: 1000,
GID: 100,
AdditionalGids: []uint32{1},
},
}},
},
},
"NonExistentUser": {
reason: "We should return an error if the supplied username doesn't exist in the supplied Passwd data.",
s: &runtime.Spec{},
args: args{
user: "doesnotexist",
p: Passwd{},
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{}},
err: errors.Errorf(errFmtNonExistentUser, "doesnotexist"),
},
},
"NonExistentGroup": {
reason: "We should return an error if the supplied group doesn't exist in the supplied Passwd data.",
s: &runtime.Spec{},
args: args{
user: "exists",
group: "doesnotexist",
p: Passwd{
UID: map[Username]UID{"exists": 1000},
},
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{}},
err: errors.Errorf(errFmtNonExistentGroup, "doesnotexist"),
},
},
"ResolveUserAndGroupToUIDAndGID": {
reason: "We should 'resolve' a username to a UID and a groupname to a GID per the supplied Passwd data.",
s: &runtime.Spec{},
args: args{
user: "negz",
group: "users",
p: Passwd{
UID: map[Username]UID{
"negz": 1000,
},
GID: map[Groupname]GID{
"users": 100,
},
},
},
want: want{
s: &runtime.Spec{Process: &runtime.Process{
User: runtime.User{
UID: 1000,
GID: 100,
},
}},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := WithUserAndGroup(tc.args.user, tc.args.group, tc.args.p)(tc.s)
if diff := cmp.Diff(tc.want.s, tc.s, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("\n%s\nWithUserAndGroup(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nWithUserAndGroup(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -1,479 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package overlay implements an overlay based container store.
package overlay
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/resource"
"github.com/crossplane/crossplane/internal/oci/layer"
"github.com/crossplane/crossplane/internal/oci/spec"
"github.com/crossplane/crossplane/internal/oci/store"
)
// Error strings
const (
errMkContainerStore = "cannot make container store directory"
errMkLayerStore = "cannot make layer store directory"
errReadConfigFile = "cannot read image config file"
errGetLayers = "cannot get image layers"
errResolveLayer = "cannot resolve layer to suitable overlayfs lower directory"
errBootstrapBundle = "cannot bootstrap bundle rootfs"
errWriteRuntimeSpec = "cannot write OCI runtime spec"
errGetDigest = "cannot get digest"
errMkAlgoDir = "cannot create store directory"
errFetchLayer = "cannot fetch and decompress layer"
errMkWorkdir = "cannot create work directory to extract layer"
errApplyLayer = "cannot apply (extract) uncompressed tarball layer"
errMvWorkdir = "cannot move temporary work directory"
errStatLayer = "cannot determine whether layer exists in store"
errCleanupWorkdir = "cannot cleanup temporary work directory"
errMkOverlayDirTmpfs = "cannot make overlay tmpfs dir"
errMkdirTemp = "cannot make temporary dir"
errMountOverlayfs = "cannot mount overlayfs"
errFmtMkOverlayDir = "cannot make overlayfs %q dir"
)
// Common overlayfs directories.
const (
overlayDirTmpfs = "tmpfs"
overlayDirUpper = "upper"
overlayDirWork = "work"
overlayDirLower = "lower" // Only used when there are no parent layers.
overlayDirMerged = "merged" // Only used when generating diff layers.
)
// Supported returns true if the supplied cacheRoot supports the overlay
// filesystem. Notably overlayfs was not supported in unprivileged user
// namespaces until Linux kernel 5.11. It's also not possible to create an
// overlayfs where the upper dir is itself on an overlayfs (i.e. is on a
// container's root filesystem).
// https://github.com/torvalds/linux/commit/459c7c565ac36ba09ffbf
func Supported(cacheRoot string) bool {
// We use NewLayerWorkdir to test because it needs to create an upper dir on
// the same filesystem as the supplied cacheRoot in order to be able to move
// it into place as a cached layer. NewOverlayBundle creates an upper dir on
// a tmpfs, and is thus supported in some cases where NewLayerWorkdir isn't.
w, err := NewLayerWorkdir(cacheRoot, "supports-overlay-test", []string{})
if err != nil {
return false
}
if err := w.Cleanup(); err != nil {
return false
}
return true
}
// An LayerResolver resolves the supplied layer to a path suitable for use as an
// overlayfs lower directory.
type LayerResolver interface {
// Resolve the supplied layer to a path suitable for use as a lower dir.
Resolve(ctx context.Context, l ociv1.Layer, parents ...ociv1.Layer) (string, error)
}
// A TarballApplicator applies (i.e. extracts) an OCI layer tarball.
// https://github.com/opencontainers/image-spec/blob/v1.0/layer.md
type TarballApplicator interface {
// Apply the supplied tarball - an OCI filesystem layer - to the supplied
// root directory. Applying all of an image's layers, in the correct order,
// should produce the image's "flattened" filesystem.
Apply(ctx context.Context, tb io.Reader, root string) error
}
// A BundleBootstrapper bootstraps a bundle by creating and mounting its rootfs.
type BundleBootstrapper interface {
Bootstrap(path string, parentLayerPaths []string) (Bundle, error)
}
// A BundleBootstrapperFn bootstraps a bundle by creating and mounting its
// rootfs.
type BundleBootstrapperFn func(path string, parentLayerPaths []string) (Bundle, error)
// Bootstrap a bundle by creating and mounting its rootfs.
func (fn BundleBootstrapperFn) Bootstrap(path string, parentLayerPaths []string) (Bundle, error) {
return fn(path, parentLayerPaths)
}
// A RuntimeSpecWriter writes an OCI runtime spec to the supplied path.
type RuntimeSpecWriter interface {
// Write and write an OCI runtime spec to the supplied path.
Write(path string, o ...spec.Option) error
}
// A RuntimeSpecWriterFn allows a function to satisfy RuntimeSpecCreator.
type RuntimeSpecWriterFn func(path string, o ...spec.Option) error
// Write an OCI runtime spec to the supplied path.
func (fn RuntimeSpecWriterFn) Write(path string, o ...spec.Option) error { return fn(path, o...) }
// An CachingBundler stores OCI containers, images, and layers. When asked to
// bundle a container for a new image the CachingBundler will extract and cache
// the image's layers as files on disk. The container's root filesystem is then
// created as an overlay atop the image's layers. The upper layer of this
// overlay is stored in memory on a tmpfs, and discarded once the container has
// finished running.
type CachingBundler struct {
root string
layer LayerResolver
bundle BundleBootstrapper
spec RuntimeSpecWriter
}
// NewCachingBundler returns a bundler that creates container filesystems as
// overlays on their image's layers, which are stored as extracted, overlay
// compatible directories of files.
func NewCachingBundler(root string) (*CachingBundler, error) {
l, err := NewCachingLayerResolver(filepath.Join(root, store.DirOverlays))
if err != nil {
return nil, errors.Wrap(err, errMkLayerStore)
}
s := &CachingBundler{
root: filepath.Join(root, store.DirContainers),
layer: l,
bundle: BundleBootstrapperFn(BootstrapBundle),
spec: RuntimeSpecWriterFn(spec.Write),
}
return s, nil
}
// Bundle returns an OCI bundle ready for use by an OCI runtime. The supplied
// image will be fetched and cached in the store if it does not already exist.
func (c *CachingBundler) Bundle(ctx context.Context, i ociv1.Image, id string, o ...spec.Option) (store.Bundle, error) {
cfg, err := i.ConfigFile()
if err != nil {
return nil, errors.Wrap(err, errReadConfigFile)
}
if err := store.Validate(i); err != nil {
return nil, err
}
layers, err := i.Layers()
if err != nil {
return nil, errors.Wrap(err, errGetLayers)
}
lowerPaths := make([]string, len(layers))
for i := range layers {
p, err := c.layer.Resolve(ctx, layers[i], layers[:i]...)
if err != nil {
return nil, errors.Wrap(err, errResolveLayer)
}
lowerPaths[i] = p
}
path := filepath.Join(c.root, id)
b, err := c.bundle.Bootstrap(path, lowerPaths)
if err != nil {
return nil, errors.Wrap(err, errBootstrapBundle)
}
// Inject config derived from the image first, so that any options passed in
// by the caller will override it.
rootfs := filepath.Join(path, store.DirRootFS)
p, g := filepath.Join(rootfs, "etc", "passwd"), filepath.Join(rootfs, "etc", "group")
opts := append([]spec.Option{spec.WithImageConfig(cfg, p, g), spec.WithRootFS(store.DirRootFS, true)}, o...)
if err = c.spec.Write(filepath.Join(path, store.FileSpec), opts...); err != nil {
_ = b.Cleanup()
return nil, errors.Wrap(err, errWriteRuntimeSpec)
}
return b, nil
}
// A CachingLayerResolver resolves an OCI layer to an overlay compatible
// directory on disk. The directory is created the first time a layer is
// resolved; subsequent calls return the cached directory.
type CachingLayerResolver struct {
root string
tarball TarballApplicator
wdopts []NewLayerWorkdirOption
}
// NewCachingLayerResolver returns a LayerResolver that extracts layers upon
// first resolution, returning cached layer paths on subsequent calls.
func NewCachingLayerResolver(root string) (*CachingLayerResolver, error) {
c := &CachingLayerResolver{
root: root,
tarball: layer.NewStackingExtractor(layer.NewWhiteoutHandler(layer.NewExtractHandler())),
}
return c, os.MkdirAll(root, 0700)
}
// Resolve the supplied layer to a path suitable for use as an overlayfs lower
// layer directory. The first time a layer is resolved it will be extracted and
// cached as an overlayfs compatible directory of files, with any OCI whiteouts
// converted to overlayfs whiteouts.
func (s *CachingLayerResolver) Resolve(ctx context.Context, l ociv1.Layer, parents ...ociv1.Layer) (string, error) {
d, err := l.DiffID() // The uncompressed layer digest.
if err != nil {
return "", errors.Wrap(err, errGetDigest)
}
path := filepath.Join(s.root, d.Algorithm, d.Hex)
if _, err = os.Stat(path); !errors.Is(err, os.ErrNotExist) {
// The path exists or we encountered an error other than ErrNotExist.
// Either way return the path and the wrapped error - errors.Wrap will
// return nil if the path exists.
return path, errors.Wrap(err, errStatLayer)
}
// Doesn't exist - cache it. It's possible multiple callers may hit this
// branch at once. This will result in multiple extractions to different
// temporary dirs. We ignore EEXIST errors from os.Rename, so callers
// that lose the race should return the path cached by the successful
// caller.
// This call to Uncompressed is what actually pulls a remote layer. In
// most cases we'll be using an image backed by our local image store.
tarball, err := l.Uncompressed()
if err != nil {
return "", errors.Wrap(err, errFetchLayer)
}
parentPaths := make([]string, len(parents))
for i := range parents {
d, err := parents[i].DiffID()
if err != nil {
return "", errors.Wrap(err, errGetDigest)
}
parentPaths[i] = filepath.Join(s.root, d.Algorithm, d.Hex)
}
lw, err := NewLayerWorkdir(filepath.Join(s.root, d.Algorithm), d.Hex, parentPaths, s.wdopts...)
if err != nil {
return "", errors.Wrap(err, errMkWorkdir)
}
if err := s.tarball.Apply(ctx, tarball, lw.ApplyPath()); err != nil {
_ = lw.Cleanup()
return "", errors.Wrap(err, errApplyLayer)
}
// If newpath exists now (when it didn't above) we must have lost a race
// with another caller to cache this layer.
if err := os.Rename(lw.ResultPath(), path); resource.Ignore(os.IsExist, err) != nil {
_ = lw.Cleanup()
return "", errors.Wrap(err, errMvWorkdir)
}
return path, errors.Wrap(lw.Cleanup(), errCleanupWorkdir)
}
// An Bundle is an OCI runtime bundle. Its root filesystem is a temporary
// overlay atop its image's cached layers.
type Bundle struct {
path string
mounts []Mount
}
// BootstrapBundle creates and returns an OCI runtime bundle with a root
// filesystem backed by a temporary (tmpfs) overlay atop the supplied lower
// layer paths.
func BootstrapBundle(path string, parentLayerPaths []string) (Bundle, error) {
if err := os.MkdirAll(path, 0700); err != nil {
return Bundle{}, errors.Wrap(err, "cannot create bundle dir")
}
if err := os.Mkdir(filepath.Join(path, overlayDirTmpfs), 0700); err != nil {
_ = os.RemoveAll(path)
return Bundle{}, errors.Wrap(err, errMkOverlayDirTmpfs)
}
tm := TmpFSMount{Mountpoint: filepath.Join(path, overlayDirTmpfs)}
if err := tm.Mount(); err != nil {
_ = os.RemoveAll(path)
return Bundle{}, errors.Wrap(err, "cannot mount workdir tmpfs")
}
for _, p := range []string{
filepath.Join(path, overlayDirTmpfs, overlayDirUpper),
filepath.Join(path, overlayDirTmpfs, overlayDirWork),
filepath.Join(path, store.DirRootFS),
} {
if err := os.Mkdir(p, 0700); err != nil {
_ = os.RemoveAll(path)
return Bundle{}, errors.Wrapf(err, "cannot create %s dir", p)
}
}
om := OverlayMount{
Lower: parentLayerPaths,
Upper: filepath.Join(path, overlayDirTmpfs, overlayDirUpper),
Work: filepath.Join(path, overlayDirTmpfs, overlayDirWork),
Mountpoint: filepath.Join(path, store.DirRootFS),
}
if err := om.Mount(); err != nil {
_ = os.RemoveAll(path)
return Bundle{}, errors.Wrap(err, "cannot mount workdir overlayfs")
}
// We pass mounts in the order they should be unmounted.
return Bundle{path: path, mounts: []Mount{om, tm}}, nil
}
// Path to the OCI bundle.
func (b Bundle) Path() string { return b.path }
// Cleanup the OCI bundle.
func (b Bundle) Cleanup() error {
for _, m := range b.mounts {
if err := m.Unmount(); err != nil {
return errors.Wrap(err, "cannot unmount bundle filesystem")
}
}
return errors.Wrap(os.RemoveAll(b.path), "cannot remove bundle")
}
// A Mount of a filesystem.
type Mount interface {
Mount() error
Unmount() error
}
// A TmpFSMount represents a mount of type tmpfs.
type TmpFSMount struct {
Mountpoint string
}
// An OverlayMount represents a mount of type overlay.
type OverlayMount struct { //nolint:revive // overlay.OverlayMount makes sense given that overlay.TmpFSMount exists too.
Mountpoint string
Lower []string
Upper string
Work string
}
// A LayerWorkdir is a temporary directory used to produce an overlayfs layer
// from an OCI layer by applying the OCI layer to a temporary overlay mount.
// It's not possible to _directly_ create overlay whiteout files in an
// unprivileged user namespace because doing so requires CAP_MKNOD in the 'root'
// or 'initial' user namespace - whiteout files are actually character devices
// per "whiteouts and opaque directories" at
// https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt
//
// We can however create overlay whiteout files indirectly by creating an
// overlay where the parent OCI layers are the lower overlayfs layers, and
// applying the layer to be cached to said fs. Doing so will produce an upper
// overlayfs layer that we can cache. This layer will be a valid lower layer
// (complete with overlay whiteout files) for either subsequent layers from the
// OCI image, or the final container root filesystem layer.
type LayerWorkdir struct {
overlay Mount
path string
}
// NewOverlayMountFn creates an overlay mount.
type NewOverlayMountFn func(path string, parentLayerPaths []string) Mount
// WorkDirOptions configure how a new layer workdir is created.
type WorkDirOptions struct {
NewOverlayMount NewOverlayMountFn
}
// NewLayerWorkdirOption configures how a new layer workdir is created.
type NewLayerWorkdirOption func(*WorkDirOptions)
// WithNewOverlayMountFn configures how a new layer workdir creates an overlay
// mount.
func WithNewOverlayMountFn(fn NewOverlayMountFn) NewLayerWorkdirOption {
return func(wdo *WorkDirOptions) {
wdo.NewOverlayMount = fn
}
}
// DefaultNewOverlayMount is the default OverlayMount created by NewLayerWorkdir.
func DefaultNewOverlayMount(path string, parentLayerPaths []string) Mount {
om := OverlayMount{
Lower: []string{filepath.Join(path, overlayDirLower)},
Upper: filepath.Join(path, overlayDirUpper),
Work: filepath.Join(path, overlayDirWork),
Mountpoint: filepath.Join(path, overlayDirMerged),
}
if len(parentLayerPaths) != 0 {
om.Lower = parentLayerPaths
}
return om
}
// NewLayerWorkdir returns a temporary directory used to produce an overlayfs
// layer from an OCI layer.
func NewLayerWorkdir(dir, digest string, parentLayerPaths []string, o ...NewLayerWorkdirOption) (LayerWorkdir, error) {
opts := &WorkDirOptions{
NewOverlayMount: DefaultNewOverlayMount,
}
for _, fn := range o {
fn(opts)
}
if err := os.MkdirAll(dir, 0700); err != nil {
return LayerWorkdir{}, errors.Wrap(err, errMkdirTemp)
}
tmp, err := os.MkdirTemp(dir, fmt.Sprintf("%s-", digest))
if err != nil {
return LayerWorkdir{}, errors.Wrap(err, errMkdirTemp)
}
for _, d := range []string{overlayDirMerged, overlayDirUpper, overlayDirLower, overlayDirWork} {
if err := os.Mkdir(filepath.Join(tmp, d), 0700); err != nil {
_ = os.RemoveAll(tmp)
return LayerWorkdir{}, errors.Wrapf(err, errFmtMkOverlayDir, d)
}
}
om := opts.NewOverlayMount(tmp, parentLayerPaths)
if err := om.Mount(); err != nil {
_ = os.RemoveAll(tmp)
return LayerWorkdir{}, errors.Wrap(err, errMountOverlayfs)
}
return LayerWorkdir{overlay: om, path: tmp}, nil
}
// ApplyPath returns the path an OCI layer should be applied (i.e. extracted) to
// in order to create an overlayfs layer.
func (d LayerWorkdir) ApplyPath() string {
return filepath.Join(d.path, overlayDirMerged)
}
// ResultPath returns the path of the resulting overlayfs layer.
func (d LayerWorkdir) ResultPath() string {
return filepath.Join(d.path, overlayDirUpper)
}
// Cleanup the temporary directory.
func (d LayerWorkdir) Cleanup() error {
if err := d.overlay.Unmount(); err != nil {
return errors.Wrap(err, "cannot unmount workdir overlayfs")
}
return errors.Wrap(os.RemoveAll(d.path), "cannot remove workdir")
}

View File

@ -1,59 +0,0 @@
//go:build linux
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package overlay
import (
"fmt"
"strings"
"golang.org/x/sys/unix"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
// NOTE(negz): Technically _all_ of the overlay implementation is only useful on
// Linux, but we want to support building what we can on other operating systems
// (e.g. Darwin) to make it possible for folks running them to ensure that code
// compiles and passes tests during development. Avoid adding code to this file
// unless it actually needs Linux to run.
// Mount the tmpfs mount.
func (m TmpFSMount) Mount() error {
var flags uintptr
return errors.Wrapf(unix.Mount("tmpfs", m.Mountpoint, "tmpfs", flags, ""), "cannot mount tmpfs at %q", m.Mountpoint)
}
// Unmount the tmpfs mount.
func (m TmpFSMount) Unmount() error {
var flags int
return errors.Wrapf(unix.Unmount(m.Mountpoint, flags), "cannot unmount tmpfs at %q", m.Mountpoint)
}
// Mount the overlay mount.
func (m OverlayMount) Mount() error {
var flags uintptr
data := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(m.Lower, ":"), m.Upper, m.Work)
return errors.Wrapf(unix.Mount("overlay", m.Mountpoint, "overlay", flags, data), "cannot mount overlayfs at %q", m.Mountpoint)
}
// Unmount the overlay mount.
func (m OverlayMount) Unmount() error {
var flags int
return errors.Wrapf(unix.Unmount(m.Mountpoint, flags), "cannot unmount overlayfs at %q", m.Mountpoint)
}

View File

@ -1,37 +0,0 @@
//go:build !linux
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package overlay
import (
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
const errLinuxOnly = "overlayfs is only only supported on Linux"
// Mount returns an error on non-Linux systems.
func (m TmpFSMount) Mount() error { return errors.New(errLinuxOnly) }
// Unmount returns an error on non-Linux systems.
func (m TmpFSMount) Unmount() error { return errors.New(errLinuxOnly) }
// Mount returns an error on non-Linux systems.
func (m OverlayMount) Mount() error { return errors.New(errLinuxOnly) }
// Unmount returns an error on non-Linux systems.
func (m OverlayMount) Unmount() error { return errors.New(errLinuxOnly) }

View File

@ -1,453 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package overlay
import (
"context"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/crossplane/crossplane/internal/oci/spec"
"github.com/crossplane/crossplane/internal/oci/store"
)
type MockImage struct {
ociv1.Image
MockDigest func() (ociv1.Hash, error)
MockConfigFile func() (*ociv1.ConfigFile, error)
MockLayers func() ([]ociv1.Layer, error)
}
func (i *MockImage) Digest() (ociv1.Hash, error) { return i.MockDigest() }
func (i *MockImage) ConfigFile() (*ociv1.ConfigFile, error) { return i.MockConfigFile() }
func (i *MockImage) Layers() ([]ociv1.Layer, error) { return i.MockLayers() }
type MockLayer struct {
ociv1.Layer
MockDiffID func() (ociv1.Hash, error)
MockUncompressed func() (io.ReadCloser, error)
}
func (l *MockLayer) DiffID() (ociv1.Hash, error) { return l.MockDiffID() }
func (l *MockLayer) Uncompressed() (io.ReadCloser, error) { return l.MockUncompressed() }
type MockLayerResolver struct {
path string
err error
}
func (r *MockLayerResolver) Resolve(_ context.Context, _ ociv1.Layer, _ ...ociv1.Layer) (string, error) {
return r.path, r.err
}
type MockTarballApplicator struct{ err error }
func (a *MockTarballApplicator) Apply(_ context.Context, _ io.Reader, _ string) error { return a.err }
type MockRuntimeSpecWriter struct{ err error }
func (c *MockRuntimeSpecWriter) Write(_ string, _ ...spec.Option) error { return c.err }
type MockCloser struct {
io.Reader
err error
}
func (c *MockCloser) Close() error { return c.err }
type MockMount struct{ err error }
func (m *MockMount) Mount() error { return m.err }
func (m *MockMount) Unmount() error { return m.err }
func TestBundle(t *testing.T) {
errBoom := errors.New("boom")
type params struct {
layer LayerResolver
bundle BundleBootstrapper
spec RuntimeSpecWriter
}
type args struct {
ctx context.Context
i ociv1.Image
id string
o []spec.Option
}
type want struct {
b store.Bundle
err error
}
cases := map[string]struct {
reason string
params params
args args
want want
}{
"ReadConfigFileError": {
reason: "We should return any error encountered reading the image's config file.",
params: params{},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return nil, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errReadConfigFile),
},
},
"GetLayersError": {
reason: "We should return any error encountered reading the image's layers.",
params: params{},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return nil, nil },
MockLayers: func() ([]ociv1.Layer, error) { return nil, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errGetLayers),
},
},
"ResolveLayerError": {
reason: "We should return any error encountered opening an image's layers.",
params: params{
layer: &MockLayerResolver{err: errBoom},
},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return nil, nil },
MockLayers: func() ([]ociv1.Layer, error) {
return []ociv1.Layer{&MockLayer{}}, nil
},
},
},
want: want{
err: errors.Wrap(errBoom, errResolveLayer),
},
},
"BootstrapBundleError": {
reason: "We should return any error encountered bootstrapping a bundle rootfs.",
params: params{
layer: &MockLayerResolver{err: nil},
bundle: BundleBootstrapperFn(func(path string, parentLayerPaths []string) (Bundle, error) {
return Bundle{}, errBoom
}),
},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return nil, nil },
MockLayers: func() ([]ociv1.Layer, error) { return nil, nil },
},
},
want: want{
err: errors.Wrap(errBoom, errBootstrapBundle),
},
},
"WriteSpecError": {
reason: "We should return any error encountered writing a runtime spec to the bundle.",
params: params{
layer: &MockLayerResolver{err: nil},
bundle: BundleBootstrapperFn(func(path string, parentLayerPaths []string) (Bundle, error) {
return Bundle{}, nil
}),
spec: &MockRuntimeSpecWriter{err: errBoom},
},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return nil, nil },
MockLayers: func() ([]ociv1.Layer, error) { return nil, nil },
},
},
want: want{
err: errors.Wrap(errBoom, errWriteRuntimeSpec),
},
},
"Success": {
reason: "We should successfully return our Bundle.",
params: params{
layer: &MockLayerResolver{err: nil},
bundle: BundleBootstrapperFn(func(path string, parentLayerPaths []string) (Bundle, error) {
return Bundle{path: "/coolbundle"}, nil
}),
spec: &MockRuntimeSpecWriter{err: nil},
},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return nil, nil },
MockLayers: func() ([]ociv1.Layer, error) {
return []ociv1.Layer{&MockLayer{}}, nil
},
},
},
want: want{
b: Bundle{path: "/coolbundle"},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
tmp, err := os.MkdirTemp(os.TempDir(), strings.ReplaceAll(t.Name(), string(os.PathSeparator), "_"))
if err != nil {
t.Fatal(err.Error())
}
defer os.RemoveAll(tmp)
c := &CachingBundler{
root: tmp,
layer: tc.params.layer,
bundle: tc.params.bundle,
spec: tc.params.spec,
}
got, err := c.Bundle(tc.args.ctx, tc.args.i, tc.args.id, tc.args.o...)
if diff := cmp.Diff(tc.want.b, got, cmp.AllowUnexported(Bundle{})); diff != "" {
t.Errorf("\n%s\nBundle(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nBundle(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestResolve(t *testing.T) {
errBoom := errors.New("boom")
type params struct {
tarball TarballApplicator
wdopts []NewLayerWorkdirOption
}
type args struct {
ctx context.Context
l ociv1.Layer
parents []ociv1.Layer
}
type want struct {
path string
err error
}
cases := map[string]struct {
reason string
files map[string][]byte
params params
args args
want want
}{
"DiffIDError": {
reason: "We should return any error encountered getting the uncompressed layer's digest.",
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) { return ociv1.Hash{}, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errGetDigest),
},
},
"SuccessExistingLayer": {
reason: "We should skip straight to returning the layer if it already exists.",
files: map[string][]byte{
"sha256/deadbeef": nil,
},
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{Algorithm: "sha256", Hex: "deadbeef"}, nil
},
},
},
want: want{
path: "/sha256/deadbeef",
},
},
"FetchLayerError": {
reason: "We should return any error we encounter while fetching a layer.",
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{Algorithm: "sha256", Hex: "deadbeef"}, nil
},
MockUncompressed: func() (io.ReadCloser, error) { return nil, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errFetchLayer),
},
},
"ParentDiffIDError": {
reason: "We should return any error we encounter while fetching a parent's uncompressed digest.",
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{Algorithm: "sha256", Hex: "deadbeef"}, nil
},
MockUncompressed: func() (io.ReadCloser, error) { return nil, nil },
},
parents: []ociv1.Layer{
&MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{}, errBoom
},
},
},
},
want: want{
err: errors.Wrap(errBoom, errGetDigest),
},
},
"NewLayerWorkDirMountOverlayError": {
reason: "We should return any error we encounter when mounting our overlayfs",
params: params{
wdopts: []NewLayerWorkdirOption{
WithNewOverlayMountFn(func(path string, parentLayerPaths []string) Mount {
return &MockMount{err: errBoom}
}),
},
},
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{Algorithm: "sha256", Hex: "deadbeef"}, nil
},
MockUncompressed: func() (io.ReadCloser, error) { return nil, nil },
},
parents: []ociv1.Layer{
&MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{Algorithm: "sha256", Hex: "badc0ffee"}, nil
},
},
},
},
want: want{
err: errors.Wrap(errors.Wrap(errBoom, errMountOverlayfs), errMkWorkdir),
},
},
"ApplyTarballError": {
reason: "We should return any error we encounter while applying our layer tarball.",
params: params{
tarball: &MockTarballApplicator{err: errBoom},
wdopts: []NewLayerWorkdirOption{
WithNewOverlayMountFn(func(path string, parentLayerPaths []string) Mount {
return &MockMount{err: nil}
}),
},
},
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{Algorithm: "sha256", Hex: "deadbeef"}, nil
},
MockUncompressed: func() (io.ReadCloser, error) { return nil, nil },
},
parents: []ociv1.Layer{
&MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{Algorithm: "sha256", Hex: "badc0ffee"}, nil
},
},
},
},
want: want{
err: errors.Wrap(errBoom, errApplyLayer),
},
},
"SuccessNewlyCachedLayer": {
reason: "We should return the path to our successfully cached layer.",
params: params{
tarball: &MockTarballApplicator{},
wdopts: []NewLayerWorkdirOption{
WithNewOverlayMountFn(func(path string, parentLayerPaths []string) Mount {
return &MockMount{err: nil}
}),
},
},
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{Algorithm: "sha256", Hex: "deadbeef"}, nil
},
MockUncompressed: func() (io.ReadCloser, error) { return nil, nil },
},
parents: []ociv1.Layer{
&MockLayer{
MockDiffID: func() (ociv1.Hash, error) {
return ociv1.Hash{Algorithm: "sha256", Hex: "badc0ffee"}, nil
},
},
},
},
want: want{
path: "/sha256/deadbeef",
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
tmp, err := os.MkdirTemp(os.TempDir(), strings.ReplaceAll(t.Name(), string(os.PathSeparator), "_"))
if err != nil {
t.Fatal(err.Error())
}
defer os.RemoveAll(tmp)
for name, data := range tc.files {
path := filepath.Join(tmp, name)
_ = os.MkdirAll(filepath.Dir(path), 0700)
_ = os.WriteFile(path, data, 0600)
}
c := &CachingLayerResolver{
root: tmp,
tarball: tc.params.tarball,
wdopts: tc.params.wdopts,
}
// Prepend our randomly named tmp dir to our wanted layer path.
wantPath := tc.want.path
if tc.want.path != "" {
wantPath = filepath.Join(tmp, tc.want.path)
}
path, err := c.Resolve(tc.args.ctx, tc.args.l, tc.args.parents...)
if diff := cmp.Diff(wantPath, path); diff != "" {
t.Errorf("\n%s\nResolve(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nResolve(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -1,371 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package store implements OCI container storage.
package store
import (
"context"
"crypto/sha256"
"fmt"
"io"
"os"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/validate"
"golang.org/x/sync/errgroup"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane/internal/oci/spec"
)
// Store directories.
// Shorter is better, to avoid passing too much data to the mount syscall when
// creating an overlay mount with many layers as lower directories.
const (
DirDigests = "d"
DirImages = "i"
DirOverlays = "o"
DirContainers = "c"
)
// Bundle paths.
const (
DirRootFS = "rootfs"
FileConfig = "config.json"
FileSpec = "config.json"
)
// Error strings
const (
errMkDigestStore = "cannot make digest store"
errReadDigest = "cannot read digest"
errParseDigest = "cannot parse digest"
errStoreDigest = "cannot store digest"
errPartial = "cannot complete partial implementation" // This should never happen.
errInvalidImage = "stored image is invalid"
errGetDigest = "cannot get digest"
errMkAlgoDir = "cannot create store directory"
errGetRawConfigFile = "cannot get image config file"
errMkTmpfile = "cannot create temporary layer file"
errReadLayer = "cannot read layer"
errMvTmpfile = "cannot move temporary layer file"
errOpenConfigFile = "cannot open image config file"
errWriteLayers = "cannot write image layers"
errInvalidLayer = "stored layer is invalid"
errWriteConfigFile = "cannot write image config file"
errGetLayers = "cannot get image layers"
errWriteLayer = "cannot write layer"
errOpenLayer = "cannot open layer"
errStatLayer = "cannot stat layer"
errCheckExistence = "cannot determine whether layer exists"
errFmtTooManyLayers = "image has too many layers: %d (max %d)"
)
var (
// MaxLayers is the maximum number of layers an image can have.
MaxLayers = 256
)
// A Bundler prepares OCI runtime bundles for use by an OCI runtime.
type Bundler interface {
// Bundle returns an OCI bundle ready for use by an OCI runtime.
Bundle(ctx context.Context, i ociv1.Image, id string, o ...spec.Option) (Bundle, error)
}
// A Bundle for use by an OCI runtime.
type Bundle interface {
// Path of the OCI bundle.
Path() string
// Cleanup the OCI bundle after the container has finished running.
Cleanup() error
}
// A Digest store is used to map OCI references to digests. Each mapping is a
// file. The filename is the SHA256 hash of the reference, and the content is
// the digest in algo:hex format.
type Digest struct{ root string }
// NewDigest returns a store used to map OCI references to digests.
func NewDigest(root string) (*Digest, error) {
// We only use sha256 hashes. The sha256 subdirectory is for symmetry with
// the other stores, which at least hypothetically support other hashes.
path := filepath.Join(root, DirDigests, "sha256")
err := os.MkdirAll(path, 0700)
return &Digest{root: path}, errors.Wrap(err, errMkDigestStore)
}
// Hash returns the stored hash for the supplied reference.
func (d *Digest) Hash(r name.Reference) (ociv1.Hash, error) {
b, err := os.ReadFile(d.path(r))
if err != nil {
return ociv1.Hash{}, errors.Wrap(err, errReadDigest)
}
h, err := ociv1.NewHash(string(b))
return h, errors.Wrap(err, errParseDigest)
}
// WriteHash maps the supplied reference to the supplied hash.
func (d *Digest) WriteHash(r name.Reference, h ociv1.Hash) error {
return errors.Wrap(os.WriteFile(d.path(r), []byte(h.String()), 0600), errStoreDigest)
}
func (d *Digest) path(r name.Reference) string {
return filepath.Join(d.root, fmt.Sprintf("%x", sha256.Sum256([]byte(r.String()))))
}
// An Image store is used to store OCI images and their layers. It uses a
// similar disk layout to the blobs directory of an OCI image layout, but may
// contain blobs for more than one image. Layers are stored as uncompressed
// tarballs in order to speed up extraction by the uncompressed Bundler, which
// extracts a fresh root filesystem each time a container is run.
// https://github.com/opencontainers/image-spec/blob/v1.0/image-layout.md
type Image struct{ root string }
// NewImage returns a store used to store OCI images and their layers.
func NewImage(root string) *Image {
return &Image{root: filepath.Join(root, DirImages)}
}
// Image returns the stored image with the supplied hash, if any.
func (i *Image) Image(h ociv1.Hash) (ociv1.Image, error) {
uncompressed := image{root: i.root, h: h}
// NOTE(negz): At the time of writing UncompressedToImage doesn't actually
// return an error.
oi, err := partial.UncompressedToImage(uncompressed)
if err != nil {
return nil, errors.Wrap(err, errPartial)
}
// This validates the image's manifest, config file, and layers. The
// manifest and config file are validated fairly extensively (i.e. their
// size, digest, etc must be correct). Layers are only validated to exist.
return oi, errors.Wrap(validate.Image(oi, validate.Fast), errInvalidImage)
}
// WriteImage writes the supplied image to the store.
func (i *Image) WriteImage(img ociv1.Image) error { //nolint:gocyclo // TODO(phisco): Refactor to reduce complexity.
d, err := img.Digest()
if err != nil {
return errors.Wrap(err, errGetDigest)
}
if _, err = i.Image(d); err == nil {
// Image already exists in the store.
return nil
}
path := filepath.Join(i.root, d.Algorithm, d.Hex)
if err := os.MkdirAll(filepath.Join(i.root, d.Algorithm), 0700); err != nil {
return errors.Wrap(err, errMkAlgoDir)
}
raw, err := img.RawConfigFile()
if err != nil {
return errors.Wrap(err, errGetRawConfigFile)
}
// CreateTemp creates a file with permission mode 0600.
tmp, err := os.CreateTemp(filepath.Join(i.root, d.Algorithm), fmt.Sprintf("%s-", d.Hex))
if err != nil {
return errors.Wrap(err, errMkTmpfile)
}
if err := os.WriteFile(tmp.Name(), raw, 0600); err != nil {
_ = os.Remove(tmp.Name())
return errors.Wrap(err, errWriteConfigFile)
}
// TODO(negz): Ignore os.ErrExist? We might get one here if two callers race
// to cache the same image.
if err := os.Rename(tmp.Name(), path); err != nil {
_ = os.Remove(tmp.Name())
return errors.Wrap(err, errMvTmpfile)
}
layers, err := img.Layers()
if err != nil {
return errors.Wrap(err, errGetLayers)
}
if err := Validate(img); err != nil {
return err
}
g := &errgroup.Group{}
for _, l := range layers {
l := l // Pin loop var.
g.Go(func() error {
return i.WriteLayer(l)
})
}
return errors.Wrap(g.Wait(), errWriteLayers)
}
// Layer returns the stored layer with the supplied hash, if any.
func (i *Image) Layer(h ociv1.Hash) (ociv1.Layer, error) {
uncompressed := layer{root: i.root, h: h}
// NOTE(negz): At the time of writing UncompressedToLayer doesn't actually
// return an error.
ol, err := partial.UncompressedToLayer(uncompressed)
if err != nil {
return nil, errors.Wrap(err, errPartial)
}
// This just validates that the layer exists on disk.
return ol, errors.Wrap(validate.Layer(ol, validate.Fast), errInvalidLayer)
}
// WriteLayer writes the supplied layer to the store.
func (i *Image) WriteLayer(l ociv1.Layer) error {
d, err := l.DiffID() // The digest of the uncompressed layer.
if err != nil {
return errors.Wrap(err, errGetDigest)
}
if _, err := i.Layer(d); err == nil {
// Layer already exists in the store.
return nil
}
if err := os.MkdirAll(filepath.Join(i.root, d.Algorithm), 0700); err != nil {
return errors.Wrap(err, errMkAlgoDir)
}
// CreateTemp creates a file with permission mode 0600.
tmp, err := os.CreateTemp(filepath.Join(i.root, d.Algorithm), fmt.Sprintf("%s-", d.Hex))
if err != nil {
return errors.Wrap(err, errMkTmpfile)
}
// This call to Uncompressed is what actually pulls the layer.
u, err := l.Uncompressed()
if err != nil {
_ = os.Remove(tmp.Name())
return errors.Wrap(err, errReadLayer)
}
if _, err := copyChunks(tmp, u, 1024*1024); err != nil { // Copy 1MB chunks.
_ = os.Remove(tmp.Name())
return errors.Wrap(err, errWriteLayer)
}
// TODO(negz): Ignore os.ErrExist? We might get one here if two callers race
// to cache the same layer.
if err := os.Rename(tmp.Name(), filepath.Join(i.root, d.Algorithm, d.Hex)); err != nil {
_ = os.Remove(tmp.Name())
return errors.Wrap(err, errMvTmpfile)
}
return nil
}
// image implements partial.UncompressedImage per
// https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/partial
type image struct {
root string
h ociv1.Hash
}
func (i image) RawConfigFile() ([]byte, error) {
b, err := os.ReadFile(filepath.Join(i.root, i.h.Algorithm, i.h.Hex))
return b, errors.Wrap(err, errOpenConfigFile)
}
func (i image) MediaType() (types.MediaType, error) {
return types.OCIManifestSchema1, nil
}
func (i image) LayerByDiffID(h ociv1.Hash) (partial.UncompressedLayer, error) {
return layer{root: i.root, h: h}, nil
}
// layer implements partial.UncompressedLayer per
// https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/partial
type layer struct {
root string
h ociv1.Hash
}
func (l layer) DiffID() (v1.Hash, error) {
return l.h, nil
}
func (l layer) Uncompressed() (io.ReadCloser, error) {
f, err := os.Open(filepath.Join(l.root, l.h.Algorithm, l.h.Hex))
return f, errors.Wrap(err, errOpenLayer)
}
func (l layer) MediaType() (types.MediaType, error) {
return types.OCIUncompressedLayer, nil
}
// Exists satisfies partial.Exists, which is used to validate the image when
// validate.Image or validate.Layer is run with the validate.Fast option.
func (l layer) Exists() (bool, error) {
_, err := os.Stat(filepath.Join(l.root, l.h.Algorithm, l.h.Hex))
if errors.Is(err, os.ErrNotExist) {
return false, nil
}
if err != nil {
return false, errors.Wrap(err, errStatLayer)
}
return true, nil
}
// copyChunks pleases gosec per https://github.com/securego/gosec/pull/433.
// Like Copy it reads from src until EOF, it does not treat an EOF from Read as
// an error to be reported.
//
// NOTE(negz): This rule confused me at first because io.Copy appears to use a
// buffer, but in fact it bypasses it if src/dst is an io.WriterTo/ReaderFrom.
func copyChunks(dst io.Writer, src io.Reader, chunkSize int64) (int64, error) {
var written int64
for {
w, err := io.CopyN(dst, src, chunkSize)
written += w
if errors.Is(err, io.EOF) {
return written, nil
}
if err != nil {
return written, err
}
}
}
// Validate returns an error if the supplied image is invalid,
// e.g. the number of layers is above the maximum allowed.
func Validate(img ociv1.Image) error {
layers, err := img.Layers()
if err != nil {
return errors.Wrap(err, errGetLayers)
}
if nLayers := len(layers); nLayers > MaxLayers {
return errors.Errorf(errFmtTooManyLayers, nLayers, MaxLayers)
}
return nil
}

View File

@ -1,353 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package store implements OCI container storage.
package store
import (
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/go-containerregistry/pkg/name"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
type MockImage struct {
ociv1.Image
MockDigest func() (ociv1.Hash, error)
MockRawConfigFile func() ([]byte, error)
MockLayers func() ([]ociv1.Layer, error)
}
func (i *MockImage) Digest() (ociv1.Hash, error) { return i.MockDigest() }
func (i *MockImage) RawConfigFile() ([]byte, error) { return i.MockRawConfigFile() }
func (i *MockImage) Layers() ([]ociv1.Layer, error) { return i.MockLayers() }
type MockLayer struct {
ociv1.Layer
MockDiffID func() (ociv1.Hash, error)
MockUncompressed func() (io.ReadCloser, error)
}
func (l *MockLayer) DiffID() (ociv1.Hash, error) { return l.MockDiffID() }
func (l *MockLayer) Uncompressed() (io.ReadCloser, error) { return l.MockUncompressed() }
func TestHash(t *testing.T) {
type args struct {
r name.Reference
}
type want struct {
h ociv1.Hash
err error
}
cases := map[string]struct {
reason string
files map[string][]byte
args args
want want
}{
"ReadError": {
reason: "We should return any error encountered reading the stored hash.",
args: args{
r: name.MustParseReference("example.org/image"),
},
want: want{
// Note we're matching with cmpopts.EquateErrors, which only
// cares that the returned error errors.Is() this one.
err: os.ErrNotExist,
},
},
"ParseError": {
reason: "We should return any error encountered reading the stored hash.",
files: map[string][]byte{
"276640b463239572f62edd97253f05e0de082e9888f57dac0b83d2149efa59e0": []byte("wat"),
},
args: args{
r: name.MustParseReference("example.org/image"),
},
want: want{
err: cmpopts.AnyError,
},
},
"SuccessfulRead": {
reason: "We should return the stored hash.",
files: map[string][]byte{
"276640b463239572f62edd97253f05e0de082e9888f57dac0b83d2149efa59e0": []byte("sha256:c34045c1a1db8d1b3fca8a692198466952daae07eaf6104b4c87ed3b55b6af1b"),
},
args: args{
r: name.MustParseReference("example.org/image"),
},
want: want{
h: ociv1.Hash{
Algorithm: "sha256",
Hex: "c34045c1a1db8d1b3fca8a692198466952daae07eaf6104b4c87ed3b55b6af1b",
},
err: nil,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
tmp, err := os.MkdirTemp(os.TempDir(), strings.ReplaceAll(t.Name(), string(os.PathSeparator), "_"))
if err != nil {
t.Fatal(err.Error())
}
t.Cleanup(func() {
os.RemoveAll(tmp)
})
for name, data := range tc.files {
path := filepath.Join(tmp, DirDigests, "sha256", name)
_ = os.MkdirAll(filepath.Dir(path), 0700)
_ = os.WriteFile(path, data, 0600)
}
c, err := NewDigest(tmp)
if err != nil {
t.Fatal(err)
}
h, err := c.Hash(tc.args.r)
if diff := cmp.Diff(tc.want.h, h); diff != "" {
t.Errorf("\n%s\nHash(...): -want, +got:\n%s", tc.reason, diff)
}
// Note cmpopts.EquateErrors, not the usual testing.EquateErrors
// from crossplane-runtime. We need this to support cmpopts.AnyError.
if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" {
t.Errorf("\n%s\nHash(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWriteImage(t *testing.T) {
errBoom := errors.New("boom")
type args struct {
i ociv1.Image
}
type want struct {
err error
}
cases := map[string]struct {
reason string
files map[string][]byte
args args
want want
}{
"DigestError": {
reason: "We should return an error if we can't get the image's digest.",
args: args{
i: &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{}, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errGetDigest),
},
},
"RawConfigFileError": {
reason: "We should return an error if we can't access the image's raw config file.",
args: args{
i: &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{Hex: "cool"}, nil },
MockRawConfigFile: func() ([]byte, error) { return nil, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errGetRawConfigFile),
},
},
"WriteLayerError": {
reason: "We should return an error if we can't write a layer to the store.",
args: args{
i: &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{Hex: "cool"}, nil },
MockRawConfigFile: func() ([]byte, error) { return nil, nil },
MockLayers: func() ([]ociv1.Layer, error) {
return []ociv1.Layer{
&MockLayer{
// To cause WriteLayer to fail.
MockDiffID: func() (ociv1.Hash, error) { return ociv1.Hash{}, errBoom },
},
}, nil
},
},
},
want: want{
err: errors.Wrap(errors.Wrap(errBoom, errGetDigest), errWriteLayers),
},
},
"SuccessfulWrite": {
reason: "We should not return an error if we successfully wrote an image to the store.",
args: args{
i: &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{Hex: "cool"}, nil },
MockRawConfigFile: func() ([]byte, error) { return []byte(`{"variant":"cool"}`), nil },
MockLayers: func() ([]ociv1.Layer, error) { return nil, nil },
},
},
want: want{
err: nil,
},
},
"SuccessfulNoOp": {
reason: "We should return early if the supplied image is already stored.",
files: map[string][]byte{
// The minimum valid config file required by validate.Image.
"cool": []byte(`{"rootfs":{"type":"layers"}}`),
},
args: args{
i: &MockImage{
MockDigest: func() (ociv1.Hash, error) { return ociv1.Hash{Hex: "cool"}, nil },
},
},
want: want{
err: nil,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
tmp, err := os.MkdirTemp(os.TempDir(), strings.ReplaceAll(t.Name(), string(os.PathSeparator), "_"))
if err != nil {
t.Fatal(err.Error())
}
t.Cleanup(func() {
os.RemoveAll(tmp)
})
for name, data := range tc.files {
path := filepath.Join(tmp, DirImages, name)
_ = os.MkdirAll(filepath.Dir(path), 0700)
_ = os.WriteFile(path, data, 0600)
}
c := NewImage(tmp)
err = c.WriteImage(tc.args.i)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nWriteImage(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestWriteLayer(t *testing.T) {
errBoom := errors.New("boom")
type args struct {
l ociv1.Layer
}
type want struct {
err error
}
cases := map[string]struct {
reason string
files map[string][]byte
args args
want want
}{
"DiffIDError": {
reason: "We should return an error if we can't get the layer's (diff) digest.",
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) { return ociv1.Hash{}, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errGetDigest),
},
},
"Uncompressed": {
reason: "We should return an error if we can't get the layer's uncompressed tarball reader.",
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) { return ociv1.Hash{}, nil },
MockUncompressed: func() (io.ReadCloser, error) { return nil, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errReadLayer),
},
},
"SuccessfulWrite": {
reason: "We should not return an error if we successfully wrote a layer to the store.",
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) { return ociv1.Hash{Hex: "cool"}, nil },
MockUncompressed: func() (io.ReadCloser, error) { return io.NopCloser(strings.NewReader("")), nil },
},
},
want: want{
err: nil,
},
},
"SuccessfulNoOp": {
reason: "We should return early if the supplied layer is already stored.",
files: map[string][]byte{
"cool": nil, // This file just has to exist.
},
args: args{
l: &MockLayer{
MockDiffID: func() (ociv1.Hash, error) { return ociv1.Hash{Hex: "cool"}, nil },
MockUncompressed: func() (io.ReadCloser, error) { return io.NopCloser(strings.NewReader("")), nil },
},
},
want: want{
err: nil,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
tmp, err := os.MkdirTemp(os.TempDir(), strings.ReplaceAll(t.Name(), string(os.PathSeparator), "_"))
if err != nil {
t.Fatal(err.Error())
}
t.Cleanup(func() {
os.RemoveAll(tmp)
})
for name, data := range tc.files {
path := filepath.Join(tmp, DirImages, name)
_ = os.MkdirAll(filepath.Dir(path), 0700)
_ = os.WriteFile(path, data, 0600)
}
c := NewImage(tmp)
err = c.WriteLayer(tc.args.l)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nWriteLayer(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -1,153 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package uncompressed implemented an uncompressed layer based container store.
package uncompressed
import (
"context"
"io"
"os"
"path/filepath"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane/internal/oci/layer"
"github.com/crossplane/crossplane/internal/oci/spec"
"github.com/crossplane/crossplane/internal/oci/store"
)
// Error strings
const (
errReadConfigFile = "cannot read image config file"
errGetLayers = "cannot get image layers"
errMkRootFS = "cannot make rootfs directory"
errOpenLayer = "cannot open layer tarball"
errApplyLayer = "cannot extract layer tarball"
errCloseLayer = "cannot close layer tarball"
errWriteRuntimeSpec = "cannot write OCI runtime spec"
errCleanupBundle = "cannot cleanup OCI runtime bundle"
)
// A TarballApplicator applies (i.e. extracts) an OCI layer tarball.
// https://github.com/opencontainers/image-spec/blob/v1.0/layer.md
type TarballApplicator interface {
// Apply the supplied tarball - an OCI filesystem layer - to the supplied
// root directory. Applying all of an image's layers, in the correct order,
// should produce the image's "flattened" filesystem.
Apply(ctx context.Context, tb io.Reader, root string) error
}
// A RuntimeSpecWriter writes an OCI runtime spec to the supplied path.
type RuntimeSpecWriter interface {
// Write and write an OCI runtime spec to the supplied path.
Write(path string, o ...spec.Option) error
}
// A RuntimeSpecWriterFn allows a function to satisfy RuntimeSpecCreator.
type RuntimeSpecWriterFn func(path string, o ...spec.Option) error
// Write an OCI runtime spec to the supplied path.
func (fn RuntimeSpecWriterFn) Write(path string, o ...spec.Option) error { return fn(path, o...) }
// A Bundler prepares OCI runtime bundles for use by an OCI runtime. It creates
// the bundle's rootfs by extracting the supplied image's uncompressed layer
// tarballs.
type Bundler struct {
root string
tarball TarballApplicator
spec RuntimeSpecWriter
}
// NewBundler returns a an OCI runtime bundler that creates a bundle's rootfs by
// extracting uncompressed layer tarballs.
func NewBundler(root string) *Bundler {
s := &Bundler{
root: filepath.Join(root, store.DirContainers),
tarball: layer.NewStackingExtractor(layer.NewWhiteoutHandler(layer.NewExtractHandler())),
spec: RuntimeSpecWriterFn(spec.Write),
}
return s
}
// Bundle returns an OCI bundle ready for use by an OCI runtime.
func (c *Bundler) Bundle(ctx context.Context, i ociv1.Image, id string, o ...spec.Option) (store.Bundle, error) {
cfg, err := i.ConfigFile()
if err != nil {
return nil, errors.Wrap(err, errReadConfigFile)
}
layers, err := i.Layers()
if err != nil {
return nil, errors.Wrap(err, errGetLayers)
}
path := filepath.Join(c.root, id)
rootfs := filepath.Join(path, store.DirRootFS)
if err := os.MkdirAll(rootfs, 0700); err != nil {
return nil, errors.Wrap(err, errMkRootFS)
}
b := Bundle{path: path}
if err := store.Validate(i); err != nil {
return nil, err
}
for _, l := range layers {
tb, err := l.Uncompressed()
if err != nil {
_ = b.Cleanup()
return nil, errors.Wrap(err, errOpenLayer)
}
if err := c.tarball.Apply(ctx, tb, rootfs); err != nil {
_ = tb.Close()
_ = b.Cleanup()
return nil, errors.Wrap(err, errApplyLayer)
}
if err := tb.Close(); err != nil {
_ = b.Cleanup()
return nil, errors.Wrap(err, errCloseLayer)
}
}
// Inject config derived from the image first, so that any options passed in
// by the caller will override it.
p, g := filepath.Join(rootfs, "etc", "passwd"), filepath.Join(rootfs, "etc", "group")
opts := append([]spec.Option{spec.WithImageConfig(cfg, p, g), spec.WithRootFS(store.DirRootFS, true)}, o...)
if err = c.spec.Write(filepath.Join(path, store.FileSpec), opts...); err != nil {
_ = b.Cleanup()
return nil, errors.Wrap(err, errWriteRuntimeSpec)
}
return b, nil
}
// An Bundle is an OCI runtime bundle. Its root filesystem is a temporary
// extraction of its image's cached layers.
type Bundle struct {
path string
}
// Path to the OCI bundle.
func (b Bundle) Path() string { return b.path }
// Cleanup the OCI bundle.
func (b Bundle) Cleanup() error {
return errors.Wrap(os.RemoveAll(b.path), errCleanupBundle)
}

View File

@ -1,247 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package uncompressed
import (
"context"
"io"
"os"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
ociv1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/crossplane/crossplane/internal/oci/spec"
"github.com/crossplane/crossplane/internal/oci/store"
)
type MockImage struct {
ociv1.Image
MockDigest func() (ociv1.Hash, error)
MockConfigFile func() (*ociv1.ConfigFile, error)
MockLayers func() ([]ociv1.Layer, error)
}
func (i *MockImage) Digest() (ociv1.Hash, error) { return i.MockDigest() }
func (i *MockImage) ConfigFile() (*ociv1.ConfigFile, error) { return i.MockConfigFile() }
func (i *MockImage) Layers() ([]ociv1.Layer, error) { return i.MockLayers() }
type MockLayer struct {
ociv1.Layer
MockDigest func() (ociv1.Hash, error)
MockUncompressed func() (io.ReadCloser, error)
}
func (l *MockLayer) Digest() (ociv1.Hash, error) { return l.MockDigest() }
func (l *MockLayer) Uncompressed() (io.ReadCloser, error) { return l.MockUncompressed() }
type MockTarballApplicator struct{ err error }
func (a *MockTarballApplicator) Apply(_ context.Context, _ io.Reader, _ string) error { return a.err }
type MockRuntimeSpecWriter struct{ err error }
func (c *MockRuntimeSpecWriter) Write(_ string, _ ...spec.Option) error { return c.err }
type MockCloser struct {
io.Reader
err error
}
func (c *MockCloser) Close() error { return c.err }
func TestBundle(t *testing.T) {
errBoom := errors.New("boom")
type params struct {
tarball TarballApplicator
spec RuntimeSpecWriter
}
type args struct {
ctx context.Context
i ociv1.Image
id string
o []spec.Option
}
type want struct {
b store.Bundle
err error
}
cases := map[string]struct {
reason string
params params
args args
want want
}{
"ReadConfigFileError": {
reason: "We should return any error encountered reading the image's config file.",
params: params{},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return nil, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errReadConfigFile),
},
},
"GetLayersError": {
reason: "We should return any error encountered reading the image's layers.",
params: params{},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return &ociv1.ConfigFile{}, nil },
MockLayers: func() ([]ociv1.Layer, error) { return nil, errBoom },
},
},
want: want{
err: errors.Wrap(errBoom, errGetLayers),
},
},
"UncompressedLayerError": {
reason: "We should return any error encountered opening an image's uncompressed layers.",
params: params{},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return &ociv1.ConfigFile{}, nil },
MockLayers: func() ([]ociv1.Layer, error) {
return []ociv1.Layer{&MockLayer{
MockUncompressed: func() (io.ReadCloser, error) { return nil, errBoom },
}}, nil
},
},
},
want: want{
err: errors.Wrap(errBoom, errOpenLayer),
},
},
"ApplyLayerTarballError": {
reason: "We should return any error encountered applying an image's layer tarball.",
params: params{
tarball: &MockTarballApplicator{err: errBoom},
},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return &ociv1.ConfigFile{}, nil },
MockLayers: func() ([]ociv1.Layer, error) {
return []ociv1.Layer{&MockLayer{
MockUncompressed: func() (io.ReadCloser, error) { return io.NopCloser(strings.NewReader("")), nil },
}}, nil
},
},
},
want: want{
err: errors.Wrap(errBoom, errApplyLayer),
},
},
"CloseLayerError": {
reason: "We should return any error encountered closing an image's layer tarball.",
params: params{
tarball: &MockTarballApplicator{},
},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return &ociv1.ConfigFile{}, nil },
MockLayers: func() ([]ociv1.Layer, error) {
return []ociv1.Layer{&MockLayer{
MockUncompressed: func() (io.ReadCloser, error) { return &MockCloser{err: errBoom}, nil },
}}, nil
},
},
},
want: want{
err: errors.Wrap(errBoom, errCloseLayer),
},
},
"WriteRuntimeSpecError": {
reason: "We should return any error encountered creating the bundle's OCI runtime spec.",
params: params{
tarball: &MockTarballApplicator{},
spec: &MockRuntimeSpecWriter{err: errBoom},
},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return &ociv1.ConfigFile{}, nil },
MockLayers: func() ([]ociv1.Layer, error) {
return []ociv1.Layer{&MockLayer{
MockUncompressed: func() (io.ReadCloser, error) { return io.NopCloser(strings.NewReader("")), nil },
}}, nil
},
},
},
want: want{
err: errors.Wrap(errBoom, errWriteRuntimeSpec),
},
},
"SuccessfulBundle": {
reason: "We should create and return an OCI bundle.",
params: params{
tarball: &MockTarballApplicator{},
spec: &MockRuntimeSpecWriter{},
},
args: args{
i: &MockImage{
MockConfigFile: func() (*ociv1.ConfigFile, error) { return &ociv1.ConfigFile{}, nil },
MockLayers: func() ([]ociv1.Layer, error) {
return []ociv1.Layer{&MockLayer{
MockUncompressed: func() (io.ReadCloser, error) { return io.NopCloser(strings.NewReader("")), nil },
}}, nil
},
},
},
want: want{
// NOTE(negz): We cmpopts.IngoreUnexported this type below, so
// we're really only testing that a non-nil bundle was returned.
b: Bundle{},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
tmp, err := os.MkdirTemp(os.TempDir(), strings.ReplaceAll(t.Name(), string(os.PathSeparator), "_"))
if err != nil {
t.Fatal(err.Error())
}
defer os.RemoveAll(tmp)
c := &Bundler{
root: tmp,
tarball: tc.params.tarball,
spec: tc.params.spec,
}
got, err := c.Bundle(tc.args.ctx, tc.args.i, tc.args.id, tc.args.o...)
if diff := cmp.Diff(tc.want.b, got, cmpopts.IgnoreUnexported(Bundle{})); diff != "" {
t.Errorf("\n%s\nBundle(...): -want, +got:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nBundle(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -1,128 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package xfn
import (
"io"
"net"
"google.golang.org/grpc"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1alpha1"
)
// Error strings.
const (
errListen = "cannot listen for gRPC connections"
errServe = "cannot serve gRPC API"
)
const defaultCacheDir = "/xfn"
// An ContainerRunner runs a Composition Function packaged as an OCI image by
// extracting it and running it as a 'rootless' container.
type ContainerRunner struct {
v1alpha1.UnimplementedContainerizedFunctionRunnerServiceServer
log logging.Logger
rootUID int
rootGID int
setuid bool // Specifically, CAP_SETUID and CAP_SETGID.
cache string
registry string
}
// A ContainerRunnerOption configures a new ContainerRunner.
type ContainerRunnerOption func(*ContainerRunner)
// MapToRoot configures what UID and GID should map to root (UID/GID 0) in the
// user namespace in which the function will be run.
func MapToRoot(uid, gid int) ContainerRunnerOption {
return func(r *ContainerRunner) {
r.rootUID = uid
r.rootGID = gid
}
}
// SetUID indicates that the container runner should attempt operations that
// require CAP_SETUID and CAP_SETGID, for example creating a user namespace that
// maps arbitrary UIDs and GIDs to the parent namespace.
func SetUID(s bool) ContainerRunnerOption {
return func(r *ContainerRunner) {
r.setuid = s
}
}
// WithCacheDir specifies the directory used for caching function images and
// containers.
func WithCacheDir(d string) ContainerRunnerOption {
return func(r *ContainerRunner) {
r.cache = d
}
}
// WithRegistry specifies the default registry used to retrieve function images and
// containers.
func WithRegistry(dr string) ContainerRunnerOption {
return func(r *ContainerRunner) {
r.registry = dr
}
}
// WithLogger configures which logger the container runner should use. Logging
// is disabled by default.
func WithLogger(l logging.Logger) ContainerRunnerOption {
return func(cr *ContainerRunner) {
cr.log = l
}
}
// NewContainerRunner returns a new Runner that runs functions as rootless
// containers.
func NewContainerRunner(o ...ContainerRunnerOption) *ContainerRunner {
r := &ContainerRunner{cache: defaultCacheDir, log: logging.NewNopLogger()}
for _, fn := range o {
fn(r)
}
return r
}
// ListenAndServe gRPC connections at the supplied address.
func (r *ContainerRunner) ListenAndServe(network, address string) error {
r.log.Debug("Listening", "network", network, "address", address)
lis, err := net.Listen(network, address)
if err != nil {
return errors.Wrap(err, errListen)
}
// TODO(negz): Limit concurrent function runs?
srv := grpc.NewServer()
v1alpha1.RegisterContainerizedFunctionRunnerServiceServer(srv, r)
return errors.Wrap(srv.Serve(lis), errServe)
}
// Stdio can be used to read and write a command's standard I/O.
type Stdio struct {
Stdin io.WriteCloser
Stdout io.ReadCloser
Stderr io.ReadCloser
}

View File

@ -1,185 +0,0 @@
//go:build linux
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package xfn
import (
"bytes"
"context"
"fmt"
"io"
"os"
"os/exec"
"syscall"
"google.golang.org/protobuf/proto"
"kernel.org/pub/linux/libs/security/libcap/cap"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1alpha1"
)
// NOTE(negz): Technically _all_ of the containerized Composition Functions
// implementation is only useful on Linux, but we want to support building what
// we can on other operating systems (e.g. Darwin) to make it possible for folks
// running them to ensure that code compiles and passes tests during
// development. Avoid adding code to this file unless it actually needs Linux to
// run.
// Error strings.
const (
errCreateStdioPipes = "cannot create stdio pipes"
errStartSpark = "cannot start " + spark
errCloseStdin = "cannot close stdin pipe"
errReadStdout = "cannot read from stdout pipe"
errReadStderr = "cannot read from stderr pipe"
errMarshalRequest = "cannot marshal RunFunctionRequest for " + spark
errWriteRequest = "cannot write RunFunctionRequest to " + spark + " stdin"
errUnmarshalResponse = "cannot unmarshal RunFunctionRequest from " + spark + " stdout"
)
// How many UIDs and GIDs to map from the parent to the child user namespace, if
// possible. Doing so requires CAP_SETUID and CAP_SETGID.
const (
UserNamespaceUIDs = 65536
UserNamespaceGIDs = 65536
MaxStdioBytes = 100 << 20 // 100 MB
)
// The subcommand of xfn to invoke - i.e. "xfn spark <source> <bundle>"
const spark = "spark"
// HasCapSetUID returns true if this process has CAP_SETUID.
func HasCapSetUID() bool {
pc := cap.GetProc()
setuid, _ := pc.GetFlag(cap.Effective, cap.SETUID)
return setuid
}
// HasCapSetGID returns true if this process has CAP_SETGID.
func HasCapSetGID() bool {
pc := cap.GetProc()
setgid, _ := pc.GetFlag(cap.Effective, cap.SETGID)
return setgid
}
// RunFunction runs a function as a rootless OCI container. Functions that
// return non-zero, or that cannot be executed in the first place (e.g. because
// they cannot be fetched from the registry) will return an error.
func (r *ContainerRunner) RunFunction(ctx context.Context, req *v1alpha1.RunFunctionRequest) (*v1alpha1.RunFunctionResponse, error) {
r.log.Debug("Running function", "image", req.Image)
/*
We want to create an overlayfs with the cached rootfs as the lower layer
and the bundle's rootfs as the upper layer, if possible. Kernel 5.11 and
later supports using overlayfs inside a user (and mount) namespace. The
best way to run code in a user namespace in Go is to execute a separate
binary; the unix.Unshare syscall affects only one OS thread, and the Go
scheduler might move the goroutine to another.
Therefore we execute a shim - xfn spark - in a new user and mount
namespace. spark fetches and caches the image, creates an OCI runtime
bundle, then executes an OCI runtime in order to actually execute
the function.
*/
cmd := exec.CommandContext(ctx, os.Args[0], spark, "--cache-dir="+r.cache, "--registry="+r.registry, //nolint:gosec // We're intentionally executing with variable input.
fmt.Sprintf("--max-stdio-bytes=%d", MaxStdioBytes))
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS,
UidMappings: []syscall.SysProcIDMap{{ContainerID: 0, HostID: r.rootUID, Size: 1}},
GidMappings: []syscall.SysProcIDMap{{ContainerID: 0, HostID: r.rootGID, Size: 1}},
}
// When we have CAP_SETUID and CAP_SETGID (i.e. typically when root), we can
// map a range of UIDs (0 to 65,336) inside the user namespace to a range in
// its parent. We can also drop privileges (in the parent user namespace) by
// running spark as root in the user namespace.
if r.setuid {
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{{ContainerID: 0, HostID: r.rootUID, Size: UserNamespaceUIDs}}
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{{ContainerID: 0, HostID: r.rootGID, Size: UserNamespaceGIDs}}
cmd.SysProcAttr.GidMappingsEnableSetgroups = true
/*
UID and GID 0 here are relative to the new user namespace - i.e. they
correspond to HostID in the parent. We're able to do this because
Go's exec.Command will:
1. Call clone(2) to create a child process in a new user namespace.
2. In the child process, wait for /proc/self/uid_map to be written.
3. In the parent process, write the child's /proc/$pid/uid_map.
4. In the child process, call setuid(2) and setgid(2) per Credential.
5. In the child process, call execve(2) to execute spark.
Per user_namespaces(7) the child process created by clone(2) starts
out with a complete set of capabilities in the new user namespace
until the call to execve(2) causes them to be recalculated. This
includes the CAP_SETUID and CAP_SETGID necessary to become UID 0 in
the child user namespace, effectively dropping privileges to UID
100000 in the parent user namespace.
https://github.com/golang/go/blob/1b03568/src/syscall/exec_linux.go#L446
*/
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: 0, Gid: 0}
}
stdio, err := StdioPipes(cmd, r.rootUID, r.rootGID)
if err != nil {
return nil, errors.Wrap(err, errCreateStdioPipes)
}
b, err := proto.Marshal(req)
if err != nil {
return nil, errors.Wrap(err, errMarshalRequest)
}
if err := cmd.Start(); err != nil {
return nil, errors.Wrap(err, errStartSpark)
}
if _, err := stdio.Stdin.Write(b); err != nil {
return nil, errors.Wrap(err, errWriteRequest)
}
// Closing the write end of the stdio pipe will cause the read end to return
// EOF. This is necessary to avoid a function blocking forever while reading
// from stdin.
if err := stdio.Stdin.Close(); err != nil {
return nil, errors.Wrap(err, errCloseStdin)
}
// We must read all of stdout and stderr before calling cmd.Wait, which
// closes the underlying pipes.
// Limited to MaxStdioBytes to avoid OOMing if the function writes a lot of
// data to stdout or stderr.
stdout, err := io.ReadAll(io.LimitReader(stdio.Stdout, MaxStdioBytes))
if err != nil {
return nil, errors.Wrap(err, errReadStdout)
}
stderr, err := io.ReadAll(io.LimitReader(stdio.Stderr, MaxStdioBytes))
if err != nil {
return nil, errors.Wrap(err, errReadStderr)
}
if err := cmd.Wait(); err != nil {
// TODO(negz): Handle stderr being too long to be a useful error.
return nil, errors.Errorf("%w: %s", err, bytes.TrimSuffix(stderr, []byte("\n")))
}
rsp := &v1alpha1.RunFunctionResponse{}
return rsp, errors.Wrap(proto.Unmarshal(stdout, rsp), errUnmarshalResponse)
}

View File

@ -1,40 +0,0 @@
//go:build !linux
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package xfn
import (
"context"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1alpha1"
)
const errLinuxOnly = "containerized functions are only supported on Linux"
// HasCapSetUID returns false on non-Linux.
func HasCapSetUID() bool { return false }
// HasCapSetGID returns false on non-Linux.
func HasCapSetGID() bool { return false }
// RunFunction returns an error on non-Linux.
func (r *ContainerRunner) RunFunction(_ context.Context, _ *v1alpha1.RunFunctionRequest) (*v1alpha1.RunFunctionResponse, error) {
return nil, errors.New(errLinuxOnly)
}

View File

@ -1,30 +0,0 @@
//go:build !unix
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package xfn
import (
"os/exec"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
// StdioPipes returns an error on non-Linux.
func StdioPipes(cmd *exec.Cmd, uid, gid int) (*Stdio, error) {
return nil, errors.New(errLinuxOnly)
}

View File

@ -1,77 +0,0 @@
//go:build unix
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package xfn
import (
"os/exec"
"syscall"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
// NOTE(negz): We build this function for unix so that folks running (e.g.)
// Darwin can build and test the code, even though it's only really useful for
// Linux systems.
// Error strings.
const (
errCreateStdinPipe = "cannot create stdin pipe"
errCreateStdoutPipe = "cannot create stdout pipe"
errCreateStderrPipe = "cannot create stderr pipe"
errChownFd = "cannot chown file descriptor"
)
// StdioPipes creates and returns pipes that will be connected to the supplied
// command's stdio when it starts. It calls fchown(2) to ensure all pipes are
// owned by the supplied user and group ID; this ensures that the command can
// read and write its stdio even when xfn is running as root (in the parent
// namespace) and the command is not.
func StdioPipes(cmd *exec.Cmd, uid, gid int) (*Stdio, error) {
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, errors.Wrap(err, errCreateStdinPipe)
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, errors.Wrap(err, errCreateStdoutPipe)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, errors.Wrap(err, errCreateStderrPipe)
}
// StdinPipe and friends above return "our end" of the pipe - i.e. stdin is
// the io.WriteCloser we can use to write to the command's stdin. They also
// setup the "command's end" of the pipe - i.e. cmd.Stdin is the io.Reader
// the command can use to read its stdin. In all cases these pipes _should_
// be *os.Files.
for _, s := range []any{stdin, stdout, stderr, cmd.Stdin, cmd.Stdout, cmd.Stderr} {
f, ok := s.(interface{ Fd() uintptr })
if !ok {
return nil, errors.Errorf("stdio pipe (type: %T) missing required Fd() method", f)
}
// We only build this file on unix because Fchown does not take an
// integer fd on Windows.
if err := syscall.Fchown(int(f.Fd()), uid, gid); err != nil {
return nil, errors.Wrap(err, errChownFd)
}
}
return &Stdio{Stdin: stdin, Stdout: stdout, Stderr: stderr}, nil
}

View File

@ -1,18 +0,0 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package xfn is the reference implementation of Composition Functions.
package xfn

View File

@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package e2e implements end-to-end tests for Crossplane.
package e2e
import (
@ -59,7 +60,6 @@ const (
var (
environment = config.NewEnvironmentFromFlags()
clusterName string
)
func TestMain(m *testing.M) {

View File

@ -1,9 +0,0 @@
apiVersion: nop.example.org/v1alpha1
kind: NopResource
metadata:
name: fn-labelizer
namespace: default
spec:
coolField: example
compositionRef:
name: fn.xnopresources.nop.example.org

View File

@ -1,39 +0,0 @@
apiVersion: apiextensions.crossplane.io/v1
kind: Composition
metadata:
name: fn.xnopresources.nop.example.org
labels:
provider: provider-nop
spec:
compositeTypeRef:
apiVersion: nop.example.org/v1alpha1
kind: XNopResource
resources:
- name: nopinstance1
base:
apiVersion: nop.crossplane.io/v1alpha1
kind: NopResource
spec:
forProvider:
conditionAfter:
- conditionType: Ready
conditionStatus: "False"
time: 0s
- conditionType: Ready
conditionStatus: "True"
time: 10s
- conditionType: Synced
conditionStatus: "False"
time: 0s
- conditionType: Synced
conditionStatus: "True"
time: 10s
writeConnectionSecretsToRef:
namespace: crossplane-system
name: nop-example-resource
functions:
- name: labelizer
type: Container
container:
image: private-docker-registry.xfn-registry.svc.cluster.local:5000/fn-labelizer:latest
imagePullPolicy: Always

View File

@ -1,29 +0,0 @@
apiVersion: apiextensions.crossplane.io/v1
kind: CompositeResourceDefinition
metadata:
name: xnopresources.nop.example.org
spec:
group: nop.example.org
names:
kind: XNopResource
plural: xnopresources
claimNames:
kind: NopResource
plural: nopresources
connectionSecretKeys:
- test
versions:
- name: v1alpha1
served: true
referenceable: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
coolField:
type: string
required:
- coolField

View File

@ -1,7 +0,0 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-nop
spec:
package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.0
ignoreCrossplaneConstraints: true

View File

@ -1,9 +0,0 @@
apiVersion: nop.example.org/v1alpha1
kind: NopResource
metadata:
name: fn-tmp-writer
namespace: default
spec:
coolField: example
compositionRef:
name: fn.xnopresources.nop.example.org

View File

@ -1,39 +0,0 @@
apiVersion: apiextensions.crossplane.io/v1
kind: Composition
metadata:
name: fn.xnopresources.nop.example.org
labels:
provider: provider-nop
spec:
compositeTypeRef:
apiVersion: nop.example.org/v1alpha1
kind: XNopResource
resources:
- name: nopinstance1
base:
apiVersion: nop.crossplane.io/v1alpha1
kind: NopResource
spec:
forProvider:
conditionAfter:
- conditionType: Ready
conditionStatus: "False"
time: 0s
- conditionType: Ready
conditionStatus: "True"
time: 10s
- conditionType: Synced
conditionStatus: "False"
time: 0s
- conditionType: Synced
conditionStatus: "True"
time: 10s
writeConnectionSecretsToRef:
namespace: crossplane-system
name: nop-example-resource
functions:
- name: tmp-writer
type: Container
container:
image: public-docker-registry.xfn-registry.svc.cluster.local:5000/fn-tmp-writer:latest
imagePullPolicy: Always

View File

@ -1,29 +0,0 @@
apiVersion: apiextensions.crossplane.io/v1
kind: CompositeResourceDefinition
metadata:
name: xnopresources.nop.example.org
spec:
group: nop.example.org
names:
kind: XNopResource
plural: xnopresources
claimNames:
kind: NopResource
plural: nopresources
connectionSecretKeys:
- test
versions:
- name: v1alpha1
served: true
referenceable: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
coolField:
type: string
required:
- coolField

View File

@ -1,7 +0,0 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-nop
spec:
package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.0
ignoreCrossplaneConstraints: true

View File

@ -1,296 +0,0 @@
/*
Copyright 2023 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"strings"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
"sigs.k8s.io/e2e-framework/pkg/features"
"sigs.k8s.io/e2e-framework/third_party/helm"
xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
v1 "github.com/crossplane/crossplane/apis/apiextensions/v1"
"github.com/crossplane/crossplane/test/e2e/config"
"github.com/crossplane/crossplane/test/e2e/funcs"
"github.com/crossplane/crossplane/test/e2e/utils"
)
const (
// LabelAreaXFN is the label used to select tests that are part of the XFN
// area.
LabelAreaXFN = "xfn"
// SuiteCompositionFunctions is the value for the
// config.LabelTestSuite label to be assigned to tests that should be part
// of the Composition functions test suite.
SuiteCompositionFunctions = "composition-functions"
// The caller (e.g. make e2e) must ensure these exist.
// Run `make build e2e-tag-images` to produce them
// TODO(phisco): make it configurable
imgxfn = "crossplane-e2e/xfn:latest"
registryNs = "xfn-registry"
timeoutFive = 5 * time.Minute
timeoutOne = 1 * time.Minute
)
func init() {
environment.AddTestSuite(SuiteCompositionFunctions,
config.WithHelmInstallOpts(
helm.WithArgs(
"--set args={--debug,--enable-composition-functions}",
"--set xfn.args={--debug}",
"--set xfn.enabled=true",
"--set xfn.image.repository="+strings.Split(imgxfn, ":")[0],
"--set xfn.image.tag="+strings.Split(imgxfn, ":")[1],
"--set xfn.resources.limits.cpu=100m",
"--set xfn.resources.requests.cpu=100m",
),
),
config.WithLabelsToSelect(features.Labels{
config.LabelTestSuite: []string{SuiteCompositionFunctions, config.TestSuiteDefault},
}),
config.WithConditionalEnvSetupFuncs(
environment.ShouldLoadImages, envfuncs.LoadDockerImageToCluster(environment.GetKindClusterName(), imgxfn),
),
)
}
func TestXfnRunnerImagePullFromPrivateRegistryWithCustomCert(t *testing.T) {
manifests := "test/e2e/manifests/xfnrunner/private-registry/pull"
environment.Test(t,
features.New(t.Name()).
WithLabel(LabelArea, LabelAreaXFN).
WithLabel(LabelStage, LabelStageAlpha).
WithLabel(LabelSize, LabelSizeLarge).
WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue).
WithLabel(config.LabelTestSuite, SuiteCompositionFunctions).
WithSetup("InstallRegistryWithCustomTlsCertificate",
funcs.AllOf(
funcs.AsFeaturesFunc(envfuncs.CreateNamespace(registryNs)),
func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
dnsName := "private-docker-registry.xfn-registry.svc.cluster.local"
caPem, keyPem, err := utils.CreateCert(dnsName)
if err != nil {
t.Fatal(err)
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "reg-cert",
Namespace: registryNs,
},
Type: corev1.SecretTypeTLS,
StringData: map[string]string{
"tls.crt": caPem,
"tls.key": keyPem,
},
}
client := config.Client().Resources()
if err := client.Create(ctx, secret); err != nil {
t.Fatalf("Cannot create secret %s: %v", secret.Name, err)
}
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "reg-ca",
Namespace: namespace,
},
Data: map[string]string{
"domain.crt": caPem,
},
}
if err := client.Create(ctx, configMap); err != nil {
t.Fatalf("Cannot create config %s: %v", configMap.Name, err)
}
return ctx
},
funcs.AsFeaturesFunc(
funcs.HelmRepo(
helm.WithArgs("add"),
helm.WithArgs("twuni"),
helm.WithArgs("https://helm.twun.io"),
)),
funcs.AsFeaturesFunc(
funcs.HelmInstall(
helm.WithName("private"),
helm.WithNamespace(registryNs),
helm.WithWait(),
helm.WithChart("twuni/docker-registry"),
helm.WithVersion("2.2.2"),
helm.WithArgs(
"--set service.type=NodePort",
"--set service.nodePort=32000",
"--set tlsSecretName=reg-cert",
),
))),
).
WithSetup("CopyFnImageToRegistry",
funcs.CopyImageToRegistry(clusterName, registryNs, "private-docker-registry", "crossplane-e2e/fn-labelizer:latest", timeoutOne)).
WithSetup("CrossplaneDeployedWithFunctionsEnabled", funcs.AllOf(
funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToSuite(SuiteCompositionFunctions,
helm.WithArgs(
"--set registryCaBundleConfig.key=domain.crt",
"--set registryCaBundleConfig.name=reg-ca",
))),
funcs.ReadyToTestWithin(1*time.Minute, namespace),
)).
WithSetup("ProviderNopDeployed", funcs.AllOf(
funcs.ApplyResources(FieldManager, manifests, "prerequisites/provider.yaml"),
funcs.ApplyResources(FieldManager, manifests, "prerequisites/definition.yaml"),
funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/provider.yaml"),
funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/definition.yaml"),
funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", v1.WatchingComposite()),
)).
Assess("CompositionWithFunctionIsCreated", funcs.AllOf(
funcs.ApplyResources(FieldManager, manifests, "composition.yaml"),
funcs.ResourcesCreatedWithin(30*time.Second, manifests, "composition.yaml"),
)).
Assess("ClaimIsCreated", funcs.AllOf(
funcs.ApplyResources(FieldManager, manifests, "claim.yaml"),
funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"),
)).
Assess("ClaimBecomesAvailable", funcs.ResourcesHaveConditionWithin(timeoutFive, manifests, "claim.yaml", xpv1.Available())).
Assess("ManagedResourcesProcessedByFunction", funcs.ManagedResourcesOfClaimHaveFieldValueWithin(timeoutFive, manifests, "claim.yaml", "metadata.labels[labelizer.xfn.crossplane.io/processed]", "true", nil)).
WithTeardown("DeleteClaim", funcs.AllOf(
funcs.DeleteResources(manifests, "claim.yaml"),
funcs.ResourcesDeletedWithin(30*time.Second, manifests, "claim.yaml"),
)).
WithTeardown("DeleteComposition", funcs.AllOf(
funcs.DeleteResources(manifests, "composition.yaml"),
funcs.ResourcesDeletedWithin(30*time.Second, manifests, "composition.yaml"),
)).
WithTeardown("ProviderNopRemoved", funcs.AllOf(
funcs.DeleteResources(manifests, "prerequisites/provider.yaml"),
funcs.DeleteResources(manifests, "prerequisites/definition.yaml"),
funcs.ResourcesDeletedWithin(30*time.Second, manifests, "prerequisites/provider.yaml"),
funcs.ResourcesDeletedWithin(30*time.Second, manifests, "prerequisites/definition.yaml"),
)).
WithTeardown("RemoveRegistry", funcs.AllOf(
funcs.AsFeaturesFunc(envfuncs.DeleteNamespace(registryNs)),
func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
client := config.Client().Resources(namespace)
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "reg-ca",
Namespace: namespace,
},
}
err := client.Delete(ctx, configMap)
if err != nil {
t.Fatal(err)
}
return ctx
},
)).
WithTeardown("CrossplaneDeployedWithoutFunctionsEnabled", funcs.AllOf(
funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToBase()),
funcs.ReadyToTestWithin(1*time.Minute, namespace),
)).
Feature(),
)
}
func TestXfnRunnerWriteToTmp(t *testing.T) {
manifests := "test/e2e/manifests/xfnrunner/tmp-writer"
environment.Test(t,
features.New(t.Name()).
WithLabel(LabelArea, LabelAreaXFN).
WithLabel(LabelStage, LabelStageAlpha).
WithLabel(LabelSize, LabelSizeLarge).
WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue).
WithLabel(config.LabelTestSuite, SuiteCompositionFunctions).
WithSetup("InstallRegistry",
funcs.AllOf(
funcs.AsFeaturesFunc(envfuncs.CreateNamespace(registryNs)),
funcs.AsFeaturesFunc(
funcs.HelmRepo(
helm.WithArgs("add"),
helm.WithArgs("twuni"),
helm.WithArgs("https://helm.twun.io"),
)),
funcs.AsFeaturesFunc(
funcs.HelmInstall(
helm.WithName("public"),
helm.WithNamespace(registryNs),
helm.WithWait(),
helm.WithChart("twuni/docker-registry"),
helm.WithVersion("2.2.2"),
helm.WithArgs(
"--set service.type=NodePort",
"--set service.nodePort=32000",
),
))),
).
WithSetup("CopyFnImageToRegistry",
funcs.CopyImageToRegistry(clusterName, registryNs, "public-docker-registry", "crossplane-e2e/fn-tmp-writer:latest", timeoutOne)).
WithSetup("CrossplaneDeployedWithFunctionsEnabled", funcs.AllOf(
funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToSuite(SuiteCompositionFunctions)),
funcs.ReadyToTestWithin(1*time.Minute, namespace),
)).
WithSetup("ProviderNopDeployed", funcs.AllOf(
funcs.ApplyResources(FieldManager, manifests, "prerequisites/provider.yaml"),
funcs.ApplyResources(FieldManager, manifests, "prerequisites/definition.yaml"),
funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/provider.yaml"),
funcs.ResourcesCreatedWithin(30*time.Second, manifests, "prerequisites/definition.yaml"),
funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "prerequisites/definition.yaml", v1.WatchingComposite()),
)).
Assess("CompositionWithFunctionIsCreated", funcs.AllOf(
funcs.ApplyResources(FieldManager, manifests, "composition.yaml"),
funcs.ResourcesCreatedWithin(30*time.Second, manifests, "composition.yaml"),
)).
Assess("ClaimIsCreated", funcs.AllOf(
funcs.ApplyResources(FieldManager, manifests, "claim.yaml"),
funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"),
)).
Assess("ClaimBecomesAvailable",
funcs.ResourcesHaveConditionWithin(timeoutFive, manifests, "claim.yaml", xpv1.Available())).
Assess("ManagedResourcesProcessedByFunction",
funcs.ManagedResourcesOfClaimHaveFieldValueWithin(timeoutFive, manifests, "claim.yaml", "metadata.labels[tmp-writer.xfn.crossplane.io]", "true",
funcs.FilterByGK(schema.GroupKind{Group: "nop.crossplane.io", Kind: "NopResource"}))).
WithTeardown("DeleteClaim", funcs.AllOf(
funcs.DeleteResources(manifests, "claim.yaml"),
funcs.ResourcesDeletedWithin(30*time.Second, manifests, "claim.yaml"),
)).
WithTeardown("DeleteComposition", funcs.AllOf(
funcs.DeleteResources(manifests, "composition.yaml"),
funcs.ResourcesDeletedWithin(30*time.Second, manifests, "composition.yaml"),
)).
WithTeardown("ProviderNopRemoved", funcs.AllOf(
funcs.DeleteResources(manifests, "prerequisites/provider.yaml"),
funcs.DeleteResources(manifests, "prerequisites/definition.yaml"),
funcs.ResourcesDeletedWithin(30*time.Second, manifests, "prerequisites/provider.yaml"),
funcs.ResourcesDeletedWithin(30*time.Second, manifests, "prerequisites/definition.yaml"),
)).
WithTeardown("RemoveRegistry", funcs.AsFeaturesFunc(envfuncs.DeleteNamespace(registryNs))).
WithTeardown("CrossplaneDeployedWithoutFunctionsEnabled", funcs.AllOf(
funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToBase()),
funcs.ReadyToTestWithin(1*time.Minute, namespace),
)).
Feature(),
)
}