From 08d98c021e5e729a4238a89f80e230d112b84020 Mon Sep 17 00:00:00 2001 From: changzhen Date: Thu, 16 Dec 2021 20:39:28 +0800 Subject: [PATCH] add dependent vendor Signed-off-by: changzhen --- go.mod | 1 + vendor/golang.org/x/crypto/AUTHORS | 3 + vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 804 +++++++++ .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 337 ++++ .../golang.org/x/crypto/cryptobyte/string.go | 161 ++ .../x/crypto/internal/subtle/aliasing.go | 33 + .../crypto/internal/subtle/aliasing_purego.go | 36 + .../x/crypto/nacl/secretbox/secretbox.go | 173 ++ .../x/crypto/poly1305/bits_compat.go | 40 + .../x/crypto/poly1305/bits_go1.13.go | 22 + .../golang.org/x/crypto/poly1305/mac_noasm.go | 10 + .../golang.org/x/crypto/poly1305/poly1305.go | 99 ++ .../golang.org/x/crypto/poly1305/sum_amd64.go | 48 + .../golang.org/x/crypto/poly1305/sum_amd64.s | 109 ++ .../x/crypto/poly1305/sum_generic.go | 310 ++++ .../x/crypto/poly1305/sum_ppc64le.go | 48 + .../x/crypto/poly1305/sum_ppc64le.s | 182 ++ .../golang.org/x/crypto/poly1305/sum_s390x.go | 76 + .../golang.org/x/crypto/poly1305/sum_s390x.s | 504 ++++++ .../x/crypto/salsa20/salsa/hsalsa20.go | 144 ++ .../x/crypto/salsa20/salsa/salsa208.go | 199 +++ .../x/crypto/salsa20/salsa/salsa20_amd64.go | 24 + .../x/crypto/salsa20/salsa/salsa20_amd64.s | 881 ++++++++++ .../x/crypto/salsa20/salsa/salsa20_noasm.go | 15 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 231 +++ vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 18 + vendor/golang.org/x/sys/cpu/byteorder.go | 65 + vendor/golang.org/x/sys/cpu/cpu.go | 287 ++++ vendor/golang.org/x/sys/cpu/cpu_aix.go | 34 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 ++ vendor/golang.org/x/sys/cpu/cpu_arm64.s | 32 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 22 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 21 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 12 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 23 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 43 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 33 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 16 + vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 71 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 24 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 10 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 32 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 16 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 12 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 ++ vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 10 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 10 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 13 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 17 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 ++ vendor/golang.org/x/sys/cpu/cpu_s390x.s | 58 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 18 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 144 ++ vendor/golang.org/x/sys/cpu/cpu_x86.s | 52 + vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 56 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 + .../natefinch/lumberjack.v2/.gitignore | 23 + .../natefinch/lumberjack.v2/.travis.yml | 6 + .../gopkg.in/natefinch/lumberjack.v2/LICENSE | 21 + .../natefinch/lumberjack.v2/README.md | 179 ++ .../gopkg.in/natefinch/lumberjack.v2/chown.go | 11 + .../natefinch/lumberjack.v2/chown_linux.go | 19 + .../natefinch/lumberjack.v2/lumberjack.go | 541 ++++++ .../pkg/apis/audit/install/install.go | 37 + .../k8s.io/apiserver/pkg/apis/config/doc.go | 19 + .../apiserver/pkg/apis/config/register.go | 53 + .../k8s.io/apiserver/pkg/apis/config/types.go | 100 ++ .../apiserver/pkg/apis/config/v1/defaults.go | 44 + .../apiserver/pkg/apis/config/v1/doc.go | 23 + .../apiserver/pkg/apis/config/v1/register.go | 53 + .../apiserver/pkg/apis/config/v1/types.go | 100 ++ .../apis/config/v1/zz_generated.conversion.go | 296 ++++ .../apis/config/v1/zz_generated.deepcopy.go | 227 +++ .../apis/config/v1/zz_generated.defaults.go | 45 + .../pkg/apis/config/validation/validation.go | 220 +++ .../pkg/apis/config/zz_generated.deepcopy.go | 227 +++ .../apiserver/pkg/authorization/path/doc.go | 18 + .../apiserver/pkg/authorization/path/path.go | 68 + .../generic/registry/decorated_watcher.go | 91 + .../pkg/registry/generic/registry/doc.go | 19 + .../pkg/registry/generic/registry/dryrun.go | 121 ++ .../generic/registry/storage_factory.go | 138 ++ .../pkg/registry/generic/registry/store.go | 1478 +++++++++++++++++ .../apiserver/pkg/server/options/OWNERS | 15 + .../apiserver/pkg/server/options/admission.go | 234 +++ .../pkg/server/options/api_enablement.go | 115 ++ .../apiserver/pkg/server/options/audit.go | 622 +++++++ .../pkg/server/options/authentication.go | 440 +++++ .../authentication_dynamic_request_header.go | 78 + .../pkg/server/options/authorization.go | 243 +++ .../apiserver/pkg/server/options/coreapi.go | 90 + .../options/deprecated_insecure_serving.go | 169 ++ .../apiserver/pkg/server/options/doc.go | 21 + .../pkg/server/options/egress_selector.go | 93 ++ .../server/options/encryptionconfig/OWNERS | 9 + .../server/options/encryptionconfig/config.go | 376 +++++ .../apiserver/pkg/server/options/etcd.go | 352 ++++ .../apiserver/pkg/server/options/feature.go | 72 + .../pkg/server/options/recommended.go | 171 ++ .../pkg/server/options/server_run_options.go | 249 +++ .../apiserver/pkg/server/options/serving.go | 383 +++++ .../pkg/server/options/serving_unix.go | 43 + .../pkg/server/options/serving_windows.go | 34 + .../server/options/serving_with_loopback.go | 81 + .../apiserver/pkg/server/options/tracing.go | 127 ++ .../pkg/server/resourceconfig/doc.go | 18 + .../pkg/server/resourceconfig/helpers.go | 201 +++ .../apiserver/pkg/storage/cacher/cacher.go | 1472 ++++++++++++++++ .../pkg/storage/cacher/caching_object.go | 397 +++++ .../apiserver/pkg/storage/cacher/metrics.go | 95 ++ .../pkg/storage/cacher/time_budget.go | 100 ++ .../apiserver/pkg/storage/cacher/util.go | 60 + .../pkg/storage/cacher/watch_cache.go | 633 +++++++ .../apiserver/pkg/storage/errors/doc.go | 18 + .../apiserver/pkg/storage/errors/storage.go | 116 ++ .../pkg/storage/value/encrypt/aes/aes.go | 152 ++ .../value/encrypt/envelope/envelope.go | 196 +++ .../value/encrypt/envelope/grpc_service.go | 181 ++ .../storage/value/encrypt/envelope/metrics.go | 102 ++ .../encrypt/envelope/v1beta1/service.pb.go | 502 ++++++ .../encrypt/envelope/v1beta1/service.proto | 70 + .../value/encrypt/envelope/v1beta1/v1beta1.go | 23 + .../value/encrypt/identity/identity.go | 50 + .../value/encrypt/secretbox/secretbox.go | 69 + vendor/k8s.io/apiserver/pkg/tracing/config.go | 119 ++ .../plugin/pkg/audit/buffered/buffered.go | 290 ++++ .../plugin/pkg/audit/buffered/doc.go | 19 + .../apiserver/plugin/pkg/audit/log/backend.go | 104 ++ .../plugin/pkg/audit/truncate/doc.go | 19 + .../plugin/pkg/audit/truncate/truncate.go | 160 ++ .../plugin/pkg/audit/webhook/webhook.go | 139 ++ vendor/modules.txt | 32 + 144 files changed, 20059 insertions(+) create mode 100644 vendor/golang.org/x/crypto/AUTHORS create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing.go create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_compat.go create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_go1.13.go create mode 100644 vendor/golang.org/x/crypto/poly1305/mac_noasm.go create mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_generic.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/README.md create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/path/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/path/path.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go create mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/admission.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/audit.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authentication.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authorization.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/coreapi.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/etcd.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/feature.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/recommended.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/tracing.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors/storage.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go create mode 100644 vendor/k8s.io/apiserver/pkg/tracing/config.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go diff --git a/go.mod b/go.mod index bde3709d8..ccc674c86 100644 --- a/go.mod +++ b/go.mod @@ -36,4 +36,5 @@ require ( sigs.k8s.io/kind v0.11.1 sigs.k8s.io/mcs-api v0.1.0 sigs.k8s.io/yaml v1.3.0 + sigs.k8s.io/structured-merge-diff/v4 v4.1.2 ) diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 000000000..2b00ddba0 --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 000000000..1fbd3e976 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 000000000..83c776de0 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,804 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer or to a big.Int, it panics. It reports whether the +// read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + if reflect.TypeOf(out).Kind() != reflect.Ptr { + panic("out is not a pointer") + } + switch reflect.ValueOf(out).Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case reflect.Struct: + if reflect.TypeOf(out).Elem() == bigIntType { + return s.readASN1BigInt(out.(*big.Int)) + } + } + panic("out does not point to an integer type") +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 4 { + return false + } + ret <<= 7 + b := s.read(1)[0] + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := uint8(bytes[0]) + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 000000000..cda8e3edf --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 000000000..ca7b1db5c --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,337 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back n bytes written directly to the Builder. An attempt by a +// child builder passed to a continuation to unwrite bytes from its parent will +// panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 000000000..589d297e6 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,161 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go new file mode 100644 index 000000000..4fad24f8d --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego +// +build !purego + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go new file mode 100644 index 000000000..80ccbed2c --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go @@ -0,0 +1,36 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego +// +build purego + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go new file mode 100644 index 000000000..a98d1bd45 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -0,0 +1,173 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package secretbox encrypts and authenticates small messages. + +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with +secret-key cryptography. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. +*/ +package secretbox // import "golang.org/x/crypto/nacl/secretbox" + +import ( + "golang.org/x/crypto/internal/subtle" + "golang.org/x/crypto/poly1305" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = poly1305.TagSize + +// setup produces a sub-key and Salsa20 counter given a nonce and key. +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { + // We use XSalsa20 for encryption so first we need to generate a + // key and nonce with HSalsa20. + var hNonce [16]byte + copy(hNonce[:], nonce[:]) + salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) + + // The final 8 bytes of the original nonce form the new nonce. + copy(counter[:], nonce[16:]) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// must not overlap message. The key and nonce pair must be unique for each +// distinct message and the output will be Overhead bytes longer than message. +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + + ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) + if subtle.AnyOverlap(out, message) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of message with the keystream generated from + // the first block. + firstMessageBlock := message + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + + tagOut := out + out = out[poly1305.TagSize:] + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + message = message[len(firstMessageBlock):] + ciphertext := out + out = out[len(firstMessageBlock):] + + // Now encrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, message, &counter, &subKey) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, ciphertext, &poly1305Key) + copy(tagOut, tag[:]) + + return ret +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { + if len(box) < Overhead { + return nil, false + } + + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + var tag [poly1305.TagSize]byte + copy(tag[:], box) + + if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { + return nil, false + } + + ret, out := sliceForAppend(out, len(box)-Overhead) + if subtle.AnyOverlap(out, box) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of box with the keystream generated from + // the first block. + box = box[Overhead:] + firstMessageBlock := box + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + + box = box[len(firstMessageBlock):] + out = out[len(firstMessageBlock):] + + // Now decrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, box, &counter, &subKey) + + return ret, true +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/poly1305/bits_compat.go new file mode 100644 index 000000000..45b5c966b --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_compat.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.13 +// +build !go1.13 + +package poly1305 + +// Generic fallbacks for the math/bits intrinsics, copied from +// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had +// variable time fallbacks until Go 1.13. + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + sum = x + y + carry + carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 + return +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + diff = x - y - borrow + borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 + return +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go new file mode 100644 index 000000000..ed52b3418 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.13 +// +build go1.13 + +package poly1305 + +import "math/bits" + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + return bits.Add64(x, y, carry) +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + return bits.Sub64(x, y, borrow) +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + return bits.Mul64(x, y) +} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go new file mode 100644 index 000000000..f184b67d9 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego +// +build !amd64,!ppc64le,!s390x !gc purego + +package poly1305 + +type mac struct{ macGeneric } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go new file mode 100644 index 000000000..9d7a6af09 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -0,0 +1,99 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 // import "golang.org/x/crypto/poly1305" + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := New(key) + h.Write(m) + h.Sum(out[:0]) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + m := &MAC{} + initialize(key, &m.macState) + return m +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum or Verify causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum or Verify. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum or Verify") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return subtle.ConstantTimeCompare(expected, mac[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go new file mode 100644 index 000000000..6d522333f --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -0,0 +1,48 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s new file mode 100644 index 000000000..1d74f0f88 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -0,0 +1,109 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + + MOVQ 0(DI), R8 // h0 + MOVQ 8(DI), R9 // h1 + MOVQ 16(DI), R10 // h2 + MOVQ 24(DI), R11 // r0 + MOVQ 32(DI), R12 // r1 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, 0(DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go new file mode 100644 index 000000000..c942a6590 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go @@ -0,0 +1,310 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides the generic implementation of Sum and MAC. Other files +// might provide optimized assembly implementations of some of this code. + +package poly1305 + +import "encoding/binary" + +// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag +// for a 64 bytes message is approximately +// +// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 +// +// for some secret r and s. It can be computed sequentially like +// +// for len(msg) > 0: +// h += read(msg, 16) +// h *= r +// h %= 2¹³⁰ - 5 +// return h + s +// +// All the complexity is about doing performant constant-time math on numbers +// larger than any available numeric type. + +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) macGeneric { + m := macGeneric{} + initialize(key, &m.macState) + return m +} + +// macState holds numbers in saturated 64-bit little-endian limbs. That is, +// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. +type macState struct { + // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but + // can grow larger during and after rounds. It must, however, remain below + // 2 * (2¹³⁰ - 5). + h [3]uint64 + // r and s are the private key components. + r [2]uint64 + s [2]uint64 +} + +type macGeneric struct { + macState + + buffer [TagSize]byte + offset int +} + +// Write splits the incoming message into TagSize chunks, and passes them to +// update. It buffers incomplete chunks. +func (h *macGeneric) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + updateGeneric(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + updateGeneric(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +// Sum flushes the last incomplete chunk from the buffer, if any, and generates +// the MAC output. It does not modify its state, in order to allow for multiple +// calls to Sum, even if no Write is allowed after Sum. +func (h *macGeneric) Sum(out *[TagSize]byte) { + state := h.macState + if h.offset > 0 { + updateGeneric(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} + +// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It +// clears some bits of the secret coefficient to make it possible to implement +// multiplication more efficiently. +const ( + rMask0 = 0x0FFFFFFC0FFFFFFF + rMask1 = 0x0FFFFFFC0FFFFFFC +) + +// initialize loads the 256-bit key into the two 128-bit secret values r and s. +func initialize(key *[32]byte, m *macState) { + m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + m.s[0] = binary.LittleEndian.Uint64(key[16:24]) + m.s[1] = binary.LittleEndian.Uint64(key[24:32]) +} + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +func mul64(a, b uint64) uint128 { + hi, lo := bitsMul64(a, b) + return uint128{lo, hi} +} + +func add128(a, b uint128) uint128 { + lo, c := bitsAdd64(a.lo, b.lo, 0) + hi, c := bitsAdd64(a.hi, b.hi, c) + if c != 0 { + panic("poly1305: unexpected overflow") + } + return uint128{lo, hi} +} + +func shiftRightBy2(a uint128) uint128 { + a.lo = a.lo>>2 | (a.hi&3)<<62 + a.hi = a.hi >> 2 + return a +} + +// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of +// 128 bits of message, it computes +// +// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// +// If the msg length is not a multiple of TagSize, it assumes the last +// incomplete chunk is the final one. +func updateGeneric(state *macState, msg []byte) { + h0, h1, h2 := state.h[0], state.h[1], state.h[2] + r0, r1 := state.r[0], state.r[1] + + for len(msg) > 0 { + var c uint64 + + // For the first step, h + m, we use a chain of bits.Add64 intrinsics. + // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially + // reduced at the end of the multiplication below. + // + // The spec requires us to set a bit just above the message size, not to + // hide leading zeroes. For full chunks, that's 1 << 128, so we can just + // add 1 to the most significant (2¹²⁸) limb, h2. + if len(msg) >= TagSize { + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h2 += c + 1 + + msg = msg[TagSize:] + } else { + var buf [TagSize]byte + copy(buf[:], msg) + buf[len(msg)] = 1 + + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h2 += c + + msg = nil + } + + // Multiplication of big number limbs is similar to elementary school + // columnar multiplication. Instead of digits, there are 64-bit limbs. + // + // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. + // + // h2 h1 h0 x + // r1 r0 = + // ---------------- + // h2r0 h1r0 h0r0 <-- individual 128-bit products + // + h2r1 h1r1 h0r1 + // ------------------------ + // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs + // ------------------------ + // m3.hi m2.hi m1.hi m0.hi <-- carry propagation + // + m3.lo m2.lo m1.lo m0.lo + // ------------------------------- + // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs + // + // The main difference from pen-and-paper multiplication is that we do + // carry propagation in a separate step, as if we wrote two digit sums + // at first (the 128-bit limbs), and then carried the tens all at once. + + h0r0 := mul64(h0, r0) + h1r0 := mul64(h1, r0) + h2r0 := mul64(h2, r0) + h0r1 := mul64(h0, r1) + h1r1 := mul64(h1, r1) + h2r1 := mul64(h2, r1) + + // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their + // top 4 bits cleared by rMask{0,1}, we know that their product is not going + // to overflow 64 bits, so we can ignore the high part of the products. + // + // This also means that the product doesn't have a fifth limb (t4). + if h2r0.hi != 0 { + panic("poly1305: unexpected overflow") + } + if h2r1.hi != 0 { + panic("poly1305: unexpected overflow") + } + + m0 := h0r0 + m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again + m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. + m3 := h2r1 + + t0 := m0.lo + t1, c := bitsAdd64(m1.lo, m0.hi, 0) + t2, c := bitsAdd64(m2.lo, m1.hi, c) + t3, _ := bitsAdd64(m3.lo, m2.hi, c) + + // Now we have the result as 4 64-bit limbs, and we need to reduce it + // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do + // a cheap partial reduction according to the reduction identity + // + // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 + // + // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is + // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the + // assumptions we make about h in the rest of the code. + // + // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 + + // We split the final result at the 2¹³⁰ mark into h and cc, the carry. + // Note that the carry bits are effectively shifted left by 2, in other + // words, cc = c * 4 for the c in the reduction identity. + h0, h1, h2 = t0, t1, t2&maskLow2Bits + cc := uint128{t2 & maskNotLow2Bits, t3} + + // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + cc = shiftRightBy2(cc) + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most + // + // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 + } + + state.h[0], state.h[1], state.h[2] = h0, h1, h2 +} + +const ( + maskLow2Bits uint64 = 0x0000000000000003 + maskNotLow2Bits uint64 = ^maskLow2Bits +) + +// select64 returns x if v == 1 and y if v == 0, in constant time. +func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } + +// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. +const ( + p0 = 0xFFFFFFFFFFFFFFFB + p1 = 0xFFFFFFFFFFFFFFFF + p2 = 0x0000000000000003 +) + +// finalize completes the modular reduction of h and computes +// +// out = h + s mod 2¹²⁸ +// +func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { + h0, h1, h2 := h[0], h[1], h[2] + + // After the partial reduction in updateGeneric, h might be more than + // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction + // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the + // result if the subtraction underflows, and t otherwise. + + hMinusP0, b := bitsSub64(h0, p0, 0) + hMinusP1, b := bitsSub64(h1, p1, b) + _, b = bitsSub64(h2, p2, b) + + // h = h if h < p else h - p + h0 = select64(b, h0, hMinusP0) + h1 = select64(b, h1, hMinusP1) + + // Finally, we compute the last Poly1305 step + // + // tag = h + s mod 2¹²⁸ + // + // by just doing a wide addition with the 128 low bits of h and discarding + // the overflow. + h0, c := bitsAdd64(h0, s[0], 0) + h1, _ = bitsAdd64(h1, s[1], c) + + binary.LittleEndian.PutUint64(out[0:8], h0) + binary.LittleEndian.PutUint64(out[8:16], h1) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go new file mode 100644 index 000000000..4a069941a --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -0,0 +1,48 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s new file mode 100644 index 000000000..58422aad2 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -0,0 +1,182 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULLD r0, h1, t4; \ + MULHDU r0, h0, t1; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + ADDZE t5; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADD t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + MOVD t0, h0; \ + MOVD t1, h1; \ + ADDZE t3; \ + ANDCC $3, t2, h2; \ + AND t2, t4, t0; \ + ADDC t0, h0, h0; \ + ADDE t3, h1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP R5, $0 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP R17, $0 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP R5, $0 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDZE R10, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go new file mode 100644 index 000000000..62cc9f847 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// updateVX is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +//go:noescape +func updateVX(state *macState, msg []byte) + +// mac is a replacement for macGeneric that uses a larger buffer and redirects +// calls that would have gone to updateGeneric to updateVX if the vector +// facility is installed. +// +// A larger buffer is required for good performance because the vector +// implementation has a higher fixed cost per call than the generic +// implementation. +type mac struct { + macState + + buffer [16 * TagSize]byte // size must be a multiple of block size (16) + offset int +} + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < len(h.buffer) { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + if cpu.S390X.HasVX { + updateVX(&h.macState, h.buffer[:]) + } else { + updateGeneric(&h.macState, h.buffer[:]) + } + } + + tail := len(p) % len(h.buffer) // number of bytes to copy into buffer + body := len(p) - tail // number of bytes to process now + if body > 0 { + if cpu.S390X.HasVX { + updateVX(&h.macState, p[:body]) + } else { + updateGeneric(&h.macState, p[:body]) + } + } + h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 + return nn, nil +} + +func (h *mac) Sum(out *[TagSize]byte) { + state := h.macState + remainder := h.buffer[:h.offset] + + // Use the generic implementation if we have 2 or fewer blocks left + // to sum. The vector implementation has a higher startup time. + if cpu.S390X.HasVX && len(remainder) > 2*TagSize { + updateVX(&state, remainder) + } else if len(remainder) > 0 { + updateGeneric(&state, remainder) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s new file mode 100644 index 000000000..69c64f842 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -0,0 +1,504 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" + +// This implementation of Poly1305 uses the vector facility (vx) +// to process up to 2 blocks (32 bytes) per iteration using an +// algorithm based on the one described in: +// +// NEON crypto, Daniel J. Bernstein & Peter Schwabe +// https://cryptojedi.org/papers/neoncrypto-20120320.pdf +// +// This algorithm uses 5 26-bit limbs to represent a 130-bit +// value. These limbs are, for the most part, zero extended and +// placed into 64-bit vector register elements. Each vector +// register is 128-bits wide and so holds 2 of these elements. +// Using 26-bit limbs allows us plenty of headroom to accomodate +// accumulations before and after multiplication without +// overflowing either 32-bits (before multiplication) or 64-bits +// (after multiplication). +// +// In order to parallelise the operations required to calculate +// the sum we use two separate accumulators and then sum those +// in an extra final step. For compatibility with the generic +// implementation we perform this summation at the end of every +// updateVX call. +// +// To use two accumulators we must multiply the message blocks +// by r² rather than r. Only the final message block should be +// multiplied by r. +// +// Example: +// +// We want to calculate the sum (h) for a 64 byte message (m): +// +// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r +// +// To do this we split the calculation into the even indices +// and odd indices of the message. These form our SIMD 'lanes': +// +// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 +// m[16:32]r³ + m[48:64]r <- lane 1 +// +// To calculate this iteratively we refactor so that both lanes +// are written in terms of r² and r: +// +// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 +// (m[16:32]r² + m[48:64])r <- lane 1 +// ^ ^ +// | coefficients for second iteration +// coefficients for first iteration +// +// So in this case we would have two iterations. In the first +// both lanes are multiplied by r². In the second only the +// first lane is multiplied by r² and the second lane is +// instead multiplied by r. This gives use the odd and even +// powers of r that we need from the original equation. +// +// Notation: +// +// h - accumulator +// r - key +// m - message +// +// [a, b] - SIMD register holding two 64-bit values +// [a, b, c, d] - SIMD register holding four 32-bit values +// xᵢ[n] - limb n of variable x with bit width i +// +// Limbs are expressed in little endian order, so for 26-bit +// limbs x₂₆[4] will be the most significant limb and x₂₆[0] +// will be the least significant limb. + +// masking constants +#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits +#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits + +// expansion constants (see EXPAND macro) +#define EX0 V2 +#define EX1 V3 +#define EX2 V4 + +// key (r², r or 1 depending on context) +#define R_0 V5 +#define R_1 V6 +#define R_2 V7 +#define R_3 V8 +#define R_4 V9 + +// precalculated coefficients (5r², 5r or 0 depending on context) +#define R5_1 V10 +#define R5_2 V11 +#define R5_3 V12 +#define R5_4 V13 + +// message block (m) +#define M_0 V14 +#define M_1 V15 +#define M_2 V16 +#define M_3 V17 +#define M_4 V18 + +// accumulator (h) +#define H_0 V19 +#define H_1 V20 +#define H_2 V21 +#define H_3 V22 +#define H_4 V23 + +// temporary registers (for short-lived values) +#define T_0 V24 +#define T_1 V25 +#define T_2 V26 +#define T_3 V27 +#define T_4 V28 + +GLOBL ·constants<>(SB), RODATA, $0x30 +// EX0 +DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 +DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d + +// MULTIPLY multiplies each lane of f and g, partially reduced +// modulo 2¹³⁰ - 5. The result, h, consists of partial products +// in each lane that need to be reduced further to produce the +// final result. +// +// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ +// +// Note that the multiplication by 5 of the high bits is +// achieved by precalculating the multiplication of four of the +// g coefficients by 5. These are g51-g54. +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g4, h4 \ + VMLOF f0, g2, h2 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g3, T_4 \ + VMLOF f1, g1, T_2 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g0, h4, h4 \ + VMALOF f4, g53, h2, h2 \ + VAG T_0, h0, h0 \ + VAG T_3, h3, h3 \ + VAG T_1, h1, h1 \ + VAG T_4, h4, h4 \ + VAG T_2, h2, h2 + +// REDUCE performs the following carry operations in four +// stages, as specified in Bernstein & Schwabe: +// +// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] +// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] +// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] +// 4: h₂₆[3]->h₂₆[4] +// +// The result is that all of the limbs are limited to 26-bits +// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. +// +// Note that although each limb is aligned at 26-bit intervals +// they may contain values that exceed 2²⁶ - 1, hence the need +// to carry the excess bits in each limb. +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// EXPAND splits the 128-bit little-endian values in0 and in1 +// into 26-bit big-endian limbs and places the results into +// the first and second lane of d₂₆[0:4] respectively. +// +// The EX0, EX1 and EX2 constants are arrays of byte indices +// for permutation. The permutation both reverses the bytes +// in the input and ensures the bytes are copied into the +// destination limb ready to be shifted into their final +// position. +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VPERM in0, in1, EX2, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] + VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] + VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] + VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] + VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] + +// func updateVX(state *macState, msg []byte) +TEXT ·updateVX(SB), NOSPLIT, $0 + MOVD state+0(FP), R1 + LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // generate masks + VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] + VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] + + // load h (accumulator) and r (key) from state + VZERO T_1 // [0, 0] + VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] + VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] + VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] + VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] + VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] + + // unpack h and r into 26-bit limbs + // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value + VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] + VZERO H_1 // [0, 0] + VZERO H_3 // [0, 0] + VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out + VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] + VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] + VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only + VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] + VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only + VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete + VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete + + // replicate r across all 4 vector elements + VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] + VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] + VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] + VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] + VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] + + // zero out lane 1 of h + VLEIG $1, $0, H_0 // [h₂₆[0], 0] + VLEIG $1, $0, H_1 // [h₂₆[1], 0] + VLEIG $1, $0, H_2 // [h₂₆[2], 0] + VLEIG $1, $0, H_3 // [h₂₆[3], 0] + VLEIG $1, $0, H_4 // [h₂₆[4], 0] + + // calculate 5r (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] + + // skip r² calculation if we are only calculating one block + CMPBLE R3, $16, skip + + // calculate r² + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) + REDUCE(M_0, M_1, M_2, M_3, M_4) + VGBM $0x0f0f, T_0 + VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] + VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] + VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] + VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] + VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] + + // calculate 5r² (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] + +loop: + CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients + + // load next 2 blocks from message + VLM (R2), T_0, T_1 + + // update message slice + SUB $32, R3 + MOVD $32(R2), R2 + + // unpack message blocks into 26-bit big-endian limbs + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // add 2¹²⁸ to each message block value + VLEIB $4, $1, M_4 + VLEIB $12, $1, M_4 + +multiply: + // accumulate the incoming message + VAG H_0, M_0, M_0 + VAG H_3, M_3, M_3 + VAG H_1, M_1, M_1 + VAG H_4, M_4, M_4 + VAG H_2, M_2, M_2 + + // multiply the accumulator by the key coefficient + MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + + // carry and partially reduce the partial products + REDUCE(H_0, H_1, H_2, H_3, H_4) + + CMPBNE R3, $0, loop + +finish: + // sum lane 0 and lane 1 and put the result in lane 1 + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_3, T_0, H_3 + VSUMQG H_1, T_0, H_1 + VSUMQG H_4, T_0, H_4 + VSUMQG H_2, T_0, H_2 + + // reduce again after summation + // TODO(mundaym): there might be a more efficient way to do this + // now that we only have 1 active lane. For example, we could + // simultaneously pack the values as we reduce them. + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 + // TODO(mundaym): in testing this final carry was unnecessary. + // Needs a proof before it can be removed though. + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2(2¹³⁰ - 5) + // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. + VESLG $26, H_1, H_1 + VESLG $26, H_3, H_3 + VO H_0, H_1, H_0 + VO H_2, H_3, H_2 + VESLG $4, H_2, H_2 + VLEIB $7, $48, H_1 + VSLB H_1, H_2, H_2 + VO H_0, H_2, H_0 + VLEIB $7, $104, H_1 + VSLB H_1, H_4, H_3 + VO H_3, H_0, H_0 + VLEIB $7, $24, H_1 + VSRLB H_1, H_4, H_1 + + // update state + VSTEG $1, H_0, 0(R1) + VSTEG $0, H_0, 8(R1) + VSTEG $1, H_1, 16(R1) + RET + +b2: // 2 or fewer blocks remaining + CMPBLE R3, $16, b1 + + // Load the 2 remaining blocks (17-32 bytes remaining). + MOVD $-17(R3), R0 // index of final byte to load modulo 16 + VL (R2), T_0 // load full 16 byte block + VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) + CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long + VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 + + // Split both blocks into 26-bit limbs in the appropriate lanes. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the second to last block. + VLEIB $4, $1, M_4 + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, M_4 + + // Finally, set up the coefficients for the final multiplication. + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r² so that can be kept the + // same. We want lane 1 to be multiplied by r so we need to move + // the saved r value into the 32-bit odd index in lane 1 by + // rotating the 64-bit lane by 32. + VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only + VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] + VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] + VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] + VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] + VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] + VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] + VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] + VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] + VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] + + MOVD $0, R3 + BR multiply + +skip: + CMPBEQ R3, $0, finish + +b1: // 1 block remaining + + // Load the final block (1-16 bytes). This will be placed into + // lane 0. + MOVD $-1(R3), R0 + VLL R0, (R2), T_0 // pad to 16 bytes with zeros + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + + // Set the message block in lane 1 to the value 0 so that it + // can be accumulated without affecting the final result. + VZERO T_1 + + // Split the final message block into 26-bit limbs in lane 0. + // Lane 1 will be contain 0. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, M_4 + + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r so we need to move the + // saved r value into the 32-bit odd index in lane 0. We want + // lane 1 to be set to the value 1. This makes multiplication + // a no-op. We do this by setting lane 1 in every register to 0 + // and then just setting the 32-bit index 3 in R_0 to 1. + VZERO T_0 + MOVD $0, R0 + MOVD $0x10111213, R12 + VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] + VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] + VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] + VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] + VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] + VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] + VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] + VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] + VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] + VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] + + // Set the value of lane 1 to be 1. + VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] + + MOVD $0, R3 + BR multiply diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go new file mode 100644 index 000000000..4c96147c8 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package salsa provides low-level access to functions in the Salsa family. +package salsa // import "golang.org/x/crypto/salsa20/salsa" + +// Sigma is the Salsa20 constant for 256-bit keys. +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} + +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte +// key k, and 16-byte constant c, and puts the result into the 32-byte array +// out. +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + for i := 0; i < 20; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x5) + out[5] = byte(x5 >> 8) + out[6] = byte(x5 >> 16) + out[7] = byte(x5 >> 24) + + out[8] = byte(x10) + out[9] = byte(x10 >> 8) + out[10] = byte(x10 >> 16) + out[11] = byte(x10 >> 24) + + out[12] = byte(x15) + out[13] = byte(x15 >> 8) + out[14] = byte(x15 >> 16) + out[15] = byte(x15 >> 24) + + out[16] = byte(x6) + out[17] = byte(x6 >> 8) + out[18] = byte(x6 >> 16) + out[19] = byte(x6 >> 24) + + out[20] = byte(x7) + out[21] = byte(x7 >> 8) + out[22] = byte(x7 >> 16) + out[23] = byte(x7 >> 24) + + out[24] = byte(x8) + out[25] = byte(x8 >> 8) + out[26] = byte(x8 >> 16) + out[27] = byte(x8 >> 24) + + out[28] = byte(x9) + out[29] = byte(x9 >> 8) + out[30] = byte(x9 >> 16) + out[31] = byte(x9 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go new file mode 100644 index 000000000..9bfc0927c --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts +// the result into the 64-byte array out. The input and output may be the same array. +func Core208(out *[64]byte, in *[64]byte) { + j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 + j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 + j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 + j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 + j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 + j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 + j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 + j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 + j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 + j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 + j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 + j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go new file mode 100644 index 000000000..c400dfcf7 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -0,0 +1,24 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc +// +build amd64,!purego,gc + +package salsa + +//go:noescape + +// salsa2020XORKeyStream is implemented in salsa20_amd64.s. +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + if len(in) == 0 { + return + } + _ = out[len(in)-1] + salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s new file mode 100644 index 000000000..c08927720 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -0,0 +1,881 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc +// +build amd64,!purego,gc + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) +// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. +TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment + MOVQ out+0(FP),DI + MOVQ in+8(FP),SI + MOVQ n+16(FP),DX + MOVQ nonce+24(FP),CX + MOVQ key+32(FP),R8 + + MOVQ SP,R12 + ADDQ $31, R12 + ANDQ $~31, R12 + + MOVQ DX,R9 + MOVQ CX,DX + MOVQ R8,R10 + CMPQ R9,$0 + JBE DONE + START: + MOVL 20(R10),CX + MOVL 0(R10),R8 + MOVL 0(DX),AX + MOVL 16(R10),R11 + MOVL CX,0(R12) + MOVL R8, 4 (R12) + MOVL AX, 8 (R12) + MOVL R11, 12 (R12) + MOVL 8(DX),CX + MOVL 24(R10),R8 + MOVL 4(R10),AX + MOVL 4(DX),R11 + MOVL CX,16(R12) + MOVL R8, 20 (R12) + MOVL AX, 24 (R12) + MOVL R11, 28 (R12) + MOVL 12(DX),CX + MOVL 12(R10),DX + MOVL 28(R10),R8 + MOVL 8(R10),AX + MOVL DX,32(R12) + MOVL CX, 36 (R12) + MOVL R8, 40 (R12) + MOVL AX, 44 (R12) + MOVQ $1634760805,DX + MOVQ $857760878,CX + MOVQ $2036477234,R8 + MOVQ $1797285236,AX + MOVL DX,48(R12) + MOVL CX, 52 (R12) + MOVL R8, 56 (R12) + MOVL AX, 60 (R12) + CMPQ R9,$256 + JB BYTESBETWEEN1AND255 + MOVOA 48(R12),X0 + PSHUFL $0X55,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X3 + PSHUFL $0X00,X0,X0 + MOVOA X1,64(R12) + MOVOA X2,80(R12) + MOVOA X3,96(R12) + MOVOA X0,112(R12) + MOVOA 0(R12),X0 + PSHUFL $0XAA,X0,X1 + PSHUFL $0XFF,X0,X2 + PSHUFL $0X00,X0,X3 + PSHUFL $0X55,X0,X0 + MOVOA X1,128(R12) + MOVOA X2,144(R12) + MOVOA X3,160(R12) + MOVOA X0,176(R12) + MOVOA 16(R12),X0 + PSHUFL $0XFF,X0,X1 + PSHUFL $0X55,X0,X2 + PSHUFL $0XAA,X0,X0 + MOVOA X1,192(R12) + MOVOA X2,208(R12) + MOVOA X0,224(R12) + MOVOA 32(R12),X0 + PSHUFL $0X00,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X0 + MOVOA X1,240(R12) + MOVOA X2,256(R12) + MOVOA X0,272(R12) + BYTESATLEAST256: + MOVL 16(R12),DX + MOVL 36 (R12),CX + MOVL DX,288(R12) + MOVL CX,304(R12) + SHLQ $32,CX + ADDQ CX,DX + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 292 (R12) + MOVL CX, 308 (R12) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 296 (R12) + MOVL CX, 312 (R12) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 300 (R12) + MOVL CX, 316 (R12) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX,16(R12) + MOVL CX, 36 (R12) + MOVQ R9,352(R12) + MOVQ $20,DX + MOVOA 64(R12),X0 + MOVOA 80(R12),X1 + MOVOA 96(R12),X2 + MOVOA 256(R12),X3 + MOVOA 272(R12),X4 + MOVOA 128(R12),X5 + MOVOA 144(R12),X6 + MOVOA 176(R12),X7 + MOVOA 192(R12),X8 + MOVOA 208(R12),X9 + MOVOA 224(R12),X10 + MOVOA 304(R12),X11 + MOVOA 112(R12),X12 + MOVOA 160(R12),X13 + MOVOA 240(R12),X14 + MOVOA 288(R12),X15 + MAINLOOP1: + MOVOA X1,320(R12) + MOVOA X2,336(R12) + MOVOA X13,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X14 + PSRLL $25,X2 + PXOR X2,X14 + MOVOA X7,X1 + PADDL X0,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X11 + PSRLL $25,X2 + PXOR X2,X11 + MOVOA X12,X1 + PADDL X14,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X15 + PSRLL $23,X2 + PXOR X2,X15 + MOVOA X0,X1 + PADDL X11,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X9 + PSRLL $23,X2 + PXOR X2,X9 + MOVOA X14,X1 + PADDL X15,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X13 + PSRLL $19,X2 + PXOR X2,X13 + MOVOA X11,X1 + PADDL X9,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X7 + PSRLL $19,X2 + PXOR X2,X7 + MOVOA X15,X1 + PADDL X13,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA 320(R12),X1 + MOVOA X12,320(R12) + MOVOA X9,X2 + PADDL X7,X2 + MOVOA X2,X12 + PSLLL $18,X2 + PXOR X2,X0 + PSRLL $14,X12 + PXOR X12,X0 + MOVOA X5,X2 + PADDL X1,X2 + MOVOA X2,X12 + PSLLL $7,X2 + PXOR X2,X3 + PSRLL $25,X12 + PXOR X12,X3 + MOVOA 336(R12),X2 + MOVOA X0,336(R12) + MOVOA X6,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X4 + PSRLL $25,X12 + PXOR X12,X4 + MOVOA X1,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X10 + PSRLL $23,X12 + PXOR X12,X10 + MOVOA X2,X0 + PADDL X4,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X8 + PSRLL $23,X12 + PXOR X12,X8 + MOVOA X3,X0 + PADDL X10,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X5 + PSRLL $19,X12 + PXOR X12,X5 + MOVOA X4,X0 + PADDL X8,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X6 + PSRLL $19,X12 + PXOR X12,X6 + MOVOA X10,X0 + PADDL X5,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA 320(R12),X0 + MOVOA X1,320(R12) + MOVOA X4,X1 + PADDL X0,X1 + MOVOA X1,X12 + PSLLL $7,X1 + PXOR X1,X7 + PSRLL $25,X12 + PXOR X12,X7 + MOVOA X8,X1 + PADDL X6,X1 + MOVOA X1,X12 + PSLLL $18,X1 + PXOR X1,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 336(R12),X12 + MOVOA X2,336(R12) + MOVOA X14,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X5 + PSRLL $25,X2 + PXOR X2,X5 + MOVOA X0,X1 + PADDL X7,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X10 + PSRLL $23,X2 + PXOR X2,X10 + MOVOA X12,X1 + PADDL X5,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X8 + PSRLL $23,X2 + PXOR X2,X8 + MOVOA X7,X1 + PADDL X10,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X4 + PSRLL $19,X2 + PXOR X2,X4 + MOVOA X5,X1 + PADDL X8,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X14 + PSRLL $19,X2 + PXOR X2,X14 + MOVOA X10,X1 + PADDL X4,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X0 + PSRLL $14,X2 + PXOR X2,X0 + MOVOA 320(R12),X1 + MOVOA X0,320(R12) + MOVOA X8,X0 + PADDL X14,X0 + MOVOA X0,X2 + PSLLL $18,X0 + PXOR X0,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA X11,X0 + PADDL X1,X0 + MOVOA X0,X2 + PSLLL $7,X0 + PXOR X0,X6 + PSRLL $25,X2 + PXOR X2,X6 + MOVOA 336(R12),X2 + MOVOA X12,336(R12) + MOVOA X3,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X13 + PSRLL $25,X12 + PXOR X12,X13 + MOVOA X1,X0 + PADDL X6,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X15 + PSRLL $23,X12 + PXOR X12,X15 + MOVOA X2,X0 + PADDL X13,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X9 + PSRLL $23,X12 + PXOR X12,X9 + MOVOA X6,X0 + PADDL X15,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X11 + PSRLL $19,X12 + PXOR X12,X11 + MOVOA X13,X0 + PADDL X9,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X3 + PSRLL $19,X12 + PXOR X12,X3 + MOVOA X15,X0 + PADDL X11,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA X9,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 320(R12),X12 + MOVOA 336(R12),X0 + SUBQ $2,DX + JA MAINLOOP1 + PADDL 112(R12),X12 + PADDL 176(R12),X7 + PADDL 224(R12),X10 + PADDL 272(R12),X4 + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 0(SI),DX + XORL 4(SI),CX + XORL 8(SI),R8 + XORL 12(SI),R9 + MOVL DX,0(DI) + MOVL CX,4(DI) + MOVL R8,8(DI) + MOVL R9,12(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 64(SI),DX + XORL 68(SI),CX + XORL 72(SI),R8 + XORL 76(SI),R9 + MOVL DX,64(DI) + MOVL CX,68(DI) + MOVL R8,72(DI) + MOVL R9,76(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 128(SI),DX + XORL 132(SI),CX + XORL 136(SI),R8 + XORL 140(SI),R9 + MOVL DX,128(DI) + MOVL CX,132(DI) + MOVL R8,136(DI) + MOVL R9,140(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + XORL 192(SI),DX + XORL 196(SI),CX + XORL 200(SI),R8 + XORL 204(SI),R9 + MOVL DX,192(DI) + MOVL CX,196(DI) + MOVL R8,200(DI) + MOVL R9,204(DI) + PADDL 240(R12),X14 + PADDL 64(R12),X0 + PADDL 128(R12),X5 + PADDL 192(R12),X8 + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 16(SI),DX + XORL 20(SI),CX + XORL 24(SI),R8 + XORL 28(SI),R9 + MOVL DX,16(DI) + MOVL CX,20(DI) + MOVL R8,24(DI) + MOVL R9,28(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 80(SI),DX + XORL 84(SI),CX + XORL 88(SI),R8 + XORL 92(SI),R9 + MOVL DX,80(DI) + MOVL CX,84(DI) + MOVL R8,88(DI) + MOVL R9,92(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 144(SI),DX + XORL 148(SI),CX + XORL 152(SI),R8 + XORL 156(SI),R9 + MOVL DX,144(DI) + MOVL CX,148(DI) + MOVL R8,152(DI) + MOVL R9,156(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + XORL 208(SI),DX + XORL 212(SI),CX + XORL 216(SI),R8 + XORL 220(SI),R9 + MOVL DX,208(DI) + MOVL CX,212(DI) + MOVL R8,216(DI) + MOVL R9,220(DI) + PADDL 288(R12),X15 + PADDL 304(R12),X11 + PADDL 80(R12),X1 + PADDL 144(R12),X6 + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 32(SI),DX + XORL 36(SI),CX + XORL 40(SI),R8 + XORL 44(SI),R9 + MOVL DX,32(DI) + MOVL CX,36(DI) + MOVL R8,40(DI) + MOVL R9,44(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 96(SI),DX + XORL 100(SI),CX + XORL 104(SI),R8 + XORL 108(SI),R9 + MOVL DX,96(DI) + MOVL CX,100(DI) + MOVL R8,104(DI) + MOVL R9,108(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 160(SI),DX + XORL 164(SI),CX + XORL 168(SI),R8 + XORL 172(SI),R9 + MOVL DX,160(DI) + MOVL CX,164(DI) + MOVL R8,168(DI) + MOVL R9,172(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + XORL 224(SI),DX + XORL 228(SI),CX + XORL 232(SI),R8 + XORL 236(SI),R9 + MOVL DX,224(DI) + MOVL CX,228(DI) + MOVL R8,232(DI) + MOVL R9,236(DI) + PADDL 160(R12),X13 + PADDL 208(R12),X9 + PADDL 256(R12),X3 + PADDL 96(R12),X2 + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 48(SI),DX + XORL 52(SI),CX + XORL 56(SI),R8 + XORL 60(SI),R9 + MOVL DX,48(DI) + MOVL CX,52(DI) + MOVL R8,56(DI) + MOVL R9,60(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 112(SI),DX + XORL 116(SI),CX + XORL 120(SI),R8 + XORL 124(SI),R9 + MOVL DX,112(DI) + MOVL CX,116(DI) + MOVL R8,120(DI) + MOVL R9,124(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 176(SI),DX + XORL 180(SI),CX + XORL 184(SI),R8 + XORL 188(SI),R9 + MOVL DX,176(DI) + MOVL CX,180(DI) + MOVL R8,184(DI) + MOVL R9,188(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + XORL 240(SI),DX + XORL 244(SI),CX + XORL 248(SI),R8 + XORL 252(SI),R9 + MOVL DX,240(DI) + MOVL CX,244(DI) + MOVL R8,248(DI) + MOVL R9,252(DI) + MOVQ 352(R12),R9 + SUBQ $256,R9 + ADDQ $256,SI + ADDQ $256,DI + CMPQ R9,$256 + JAE BYTESATLEAST256 + CMPQ R9,$0 + JBE DONE + BYTESBETWEEN1AND255: + CMPQ R9,$64 + JAE NOCOPY + MOVQ DI,DX + LEAQ 360(R12),DI + MOVQ R9,CX + REP; MOVSB + LEAQ 360(R12),DI + LEAQ 360(R12),SI + NOCOPY: + MOVQ R9,352(R12) + MOVOA 48(R12),X0 + MOVOA 0(R12),X1 + MOVOA 16(R12),X2 + MOVOA 32(R12),X3 + MOVOA X1,X4 + MOVQ $20,CX + MAINLOOP2: + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + SUBQ $4,CX + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PXOR X7,X7 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + JA MAINLOOP2 + PADDL 48(R12),X0 + PADDL 0(R12),X1 + PADDL 16(R12),X2 + PADDL 32(R12),X3 + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 0(SI),CX + XORL 48(SI),R8 + XORL 32(SI),R9 + XORL 16(SI),AX + MOVL CX,0(DI) + MOVL R8,48(DI) + MOVL R9,32(DI) + MOVL AX,16(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 20(SI),CX + XORL 4(SI),R8 + XORL 52(SI),R9 + XORL 36(SI),AX + MOVL CX,20(DI) + MOVL R8,4(DI) + MOVL R9,52(DI) + MOVL AX,36(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 40(SI),CX + XORL 24(SI),R8 + XORL 8(SI),R9 + XORL 56(SI),AX + MOVL CX,40(DI) + MOVL R8,24(DI) + MOVL R9,8(DI) + MOVL AX,56(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + XORL 60(SI),CX + XORL 44(SI),R8 + XORL 28(SI),R9 + XORL 12(SI),AX + MOVL CX,60(DI) + MOVL R8,44(DI) + MOVL R9,28(DI) + MOVL AX,12(DI) + MOVQ 352(R12),R9 + MOVL 16(R12),CX + MOVL 36 (R12),R8 + ADDQ $1,CX + SHLQ $32,R8 + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $32,R8 + MOVL CX,16(R12) + MOVL R8, 36 (R12) + CMPQ R9,$64 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI,SI + MOVQ DX,DI + MOVQ R9,CX + REP; MOVSB + BYTESATLEAST64: + DONE: + RET + BYTESATLEAST65: + SUBQ $64,R9 + ADDQ $64,DI + ADDQ $64,SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go new file mode 100644 index 000000000..4392cc1ac --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package salsa + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + genericXORKeyStream(out, in, counter, key) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go new file mode 100644 index 000000000..68169c6d6 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -0,0 +1,231 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +const rounds = 20 + +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} + +// genericXORKeyStream is the generic implementation of XORKeyStream to be used +// when no assembly implementation is available. +func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key, &Sigma) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 8; i < 16; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key, &Sigma) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 000000000..db9171c2e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,18 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 000000000..dcbb14ef3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 000000000..b56886f26 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,287 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go new file mode 100644 index 000000000..8aaeef545 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix +// +build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER8 = true + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 000000000..301b752e9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 000000000..87dd5e302 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,172 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + } +} + +func archInit() { + switch runtime.GOOS { + case "freebsd": + readARM64Registers() + case "linux", "netbsd": + doinit() + default: + // Most platforms don't seem to allow reading these registers. + // + // OpenBSD: + // See https://golang.org/issue/31746 + setMinimalFeatures() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 000000000..c61f95a05 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 000000000..ccf542a73 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 000000000..0af2f2484 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 000000000..3298a87e9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc +// +build 386 amd64 amd64p32 +// +build gc + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) + +// darwinSupportsAVX512 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo_x86.go for gccgo. +func darwinSupportsAVX512() bool diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 000000000..2aff31891 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo +// +build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 000000000..4bfbda619 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,23 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 000000000..e363c7d13 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 000000000..863d415ab --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} + +// gccgo doesn't build on Darwin, per: +// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 +func darwinSupportsAVX512() bool { + return false +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 000000000..159a686f6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 +// +build !386,!amd64,!amd64p32,!arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 000000000..2057006dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 000000000..79a38a0b9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,71 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + if err := readHWCAP(); err != nil { + // failed to read /proc/self/auxv, try reading registers directly + readARM64Registers() + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 000000000..6000db4cd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,24 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) +// +build linux +// +build mips64 mips64le + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 000000000..f4992b1a5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 000000000..021356d6d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) +// +build linux +// +build ppc64 ppc64le + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 000000000..1517ac61d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 000000000..f4063c664 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 000000000..07c4e36d8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 000000000..ebfb3fc8e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 000000000..d7b4fb4cc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,10 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm +// +build !linux,arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 000000000..f8c484f58 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !netbsd && arm64 +// +build !linux,!netbsd,arm64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 000000000..0dafe9644 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) +// +build !linux +// +build mips64 mips64le + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 000000000..4e8acd165 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,17 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 000000000..bd6c128af --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 +// +build riscv64 + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 000000000..5881b8833 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 000000000..96f81e209 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,58 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 000000000..7747d888a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,18 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 000000000..5ea287b7e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,144 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 +// +build 386 amd64 amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + if runtime.GOOS == "darwin" { + // Check darwin commpage for AVX512 support. Necessary because: + // https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/osfmk/i386/fpu.c#L175-L201 + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 000000000..a864f24d7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Morever, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo +// +build aix,gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 000000000..904be42ff --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc +// +build aix,ppc64,gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml new file mode 100644 index 000000000..65dcbc56d --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.8 + - 1.7 + - 1.6 \ No newline at end of file diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE new file mode 100644 index 000000000..c3d4cc307 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Nate Finch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md new file mode 100644 index 000000000..060eae52a --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md @@ -0,0 +1,179 @@ +# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://travis-ci.org/natefinch/lumberjack.svg?branch=v2.0)](https://travis-ci.org/natefinch/lumberjack) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0) + +### Lumberjack is a Go package for writing logs to rolling files. + +Package lumberjack provides a rolling logger. + +Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +thusly: + + import "gopkg.in/natefinch/lumberjack.v2" + +The package name remains simply lumberjack, and the code resides at +https://github.com/natefinch/lumberjack under the v2.0 branch. + +Lumberjack is intended to be one part of a logging infrastructure. +It is not an all-in-one solution, but instead is a pluggable +component at the bottom of the logging stack that simply controls the files +to which logs are written. + +Lumberjack plays well with any logging package that can write to an +io.Writer, including the standard library's log package. + +Lumberjack assumes that only one process is writing to the output files. +Using the same lumberjack configuration from multiple processes on the same +machine will result in improper behavior. + + +**Example** + +To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts. + +Code: + +```go +log.SetOutput(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, //days + Compress: true, // disabled by default +}) +``` + + + +## type Logger +``` go +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + // contains filtered or unexported fields +} +``` +Logger is an io.WriteCloser that writes to the specified filename. + +Logger opens or creates the logfile on first Write. If the file exists and +is less than MaxSize megabytes, lumberjack will open and append to that file. +If the file exists and its size is >= MaxSize megabytes, the file is renamed +by putting the current time in a timestamp in the name immediately before the +file's extension (or the end of the filename if there's no extension). A new +log file is then created using original filename. + +Whenever a write would cause the current log file exceed MaxSize megabytes, +the current file is closed, renamed, and a new log file created with the +original name. Thus, the filename you give Logger is always the "current" log +file. + +Backups use the log file name given to Logger, in the form `name-timestamp.ext` +where name is the filename without the extension, timestamp is the time at which +the log was rotated formatted with the time.Time format of +`2006-01-02T15-04-05.000` and the extension is the original extension. For +example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created +at 6:30pm on Nov 11 2016 would use the filename +`/var/log/foo/server-2016-11-04T18-30-00.000.log` + +### Cleaning Up Old Log Files +Whenever a new logfile gets created, old log files may be deleted. The most +recent files according to the encoded timestamp will be retained, up to a +number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +with an encoded timestamp older than MaxAge days are deleted, regardless of +MaxBackups. Note that the time encoded in the timestamp is the rotation +time, which may differ from the last time that file was written to. + +If MaxBackups and MaxAge are both 0, no old log files will be deleted. + + + + + + + + + + + +### func (\*Logger) Close +``` go +func (l *Logger) Close() error +``` +Close implements io.Closer, and closes the current logfile. + + + +### func (\*Logger) Rotate +``` go +func (l *Logger) Rotate() error +``` +Rotate causes Logger to close the existing log file and immediately create a +new one. This is a helper function for applications that want to initiate +rotations outside of the normal rotation rules, such as in response to +SIGHUP. After rotating, this initiates a cleanup of old log files according +to the normal rules. + +**Example** + +Example of how to rotate in response to SIGHUP. + +Code: + +```go +l := &lumberjack.Logger{} +log.SetOutput(l) +c := make(chan os.Signal, 1) +signal.Notify(c, syscall.SIGHUP) + +go func() { + for { + <-c + l.Rotate() + } +}() +``` + +### func (\*Logger) Write +``` go +func (l *Logger) Write(p []byte) (n int, err error) +``` +Write implements io.Writer. If a write would cause the log file to be larger +than MaxSize, the file is closed, renamed to include a timestamp of the +current time, and a new log file is created using the original log file name. +If the length of the write is greater than MaxSize, an error is returned. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go new file mode 100644 index 000000000..11d066972 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go @@ -0,0 +1,11 @@ +// +build !linux + +package lumberjack + +import ( + "os" +) + +func chown(_ string, _ os.FileInfo) error { + return nil +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go new file mode 100644 index 000000000..2758ec9ce --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go @@ -0,0 +1,19 @@ +package lumberjack + +import ( + "os" + "syscall" +) + +// os_Chown is a var so we can mock it out during tests. +var os_Chown = os.Chown + +func chown(name string, info os.FileInfo) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + f.Close() + stat := info.Sys().(*syscall.Stat_t) + return os_Chown(name, int(stat.Uid), int(stat.Gid)) +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go new file mode 100644 index 000000000..46d97c553 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go @@ -0,0 +1,541 @@ +// Package lumberjack provides a rolling logger. +// +// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +// thusly: +// +// import "gopkg.in/natefinch/lumberjack.v2" +// +// The package name remains simply lumberjack, and the code resides at +// https://github.com/natefinch/lumberjack under the v2.0 branch. +// +// Lumberjack is intended to be one part of a logging infrastructure. +// It is not an all-in-one solution, but instead is a pluggable +// component at the bottom of the logging stack that simply controls the files +// to which logs are written. +// +// Lumberjack plays well with any logging package that can write to an +// io.Writer, including the standard library's log package. +// +// Lumberjack assumes that only one process is writing to the output files. +// Using the same lumberjack configuration from multiple processes on the same +// machine will result in improper behavior. +package lumberjack + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" +) + +const ( + backupTimeFormat = "2006-01-02T15-04-05.000" + compressSuffix = ".gz" + defaultMaxSize = 100 +) + +// ensure we always implement io.WriteCloser +var _ io.WriteCloser = (*Logger)(nil) + +// Logger is an io.WriteCloser that writes to the specified filename. +// +// Logger opens or creates the logfile on first Write. If the file exists and +// is less than MaxSize megabytes, lumberjack will open and append to that file. +// If the file exists and its size is >= MaxSize megabytes, the file is renamed +// by putting the current time in a timestamp in the name immediately before the +// file's extension (or the end of the filename if there's no extension). A new +// log file is then created using original filename. +// +// Whenever a write would cause the current log file exceed MaxSize megabytes, +// the current file is closed, renamed, and a new log file created with the +// original name. Thus, the filename you give Logger is always the "current" log +// file. +// +// Backups use the log file name given to Logger, in the form +// `name-timestamp.ext` where name is the filename without the extension, +// timestamp is the time at which the log was rotated formatted with the +// time.Time format of `2006-01-02T15-04-05.000` and the extension is the +// original extension. For example, if your Logger.Filename is +// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would +// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log` +// +// Cleaning Up Old Log Files +// +// Whenever a new logfile gets created, old log files may be deleted. The most +// recent files according to the encoded timestamp will be retained, up to a +// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +// with an encoded timestamp older than MaxAge days are deleted, regardless of +// MaxBackups. Note that the time encoded in the timestamp is the rotation +// time, which may differ from the last time that file was written to. +// +// If MaxBackups and MaxAge are both 0, no old log files will be deleted. +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + + size int64 + file *os.File + mu sync.Mutex + + millCh chan bool + startMill sync.Once +} + +var ( + // currentTime exists so it can be mocked out by tests. + currentTime = time.Now + + // os_Stat exists so it can be mocked out by tests. + os_Stat = os.Stat + + // megabyte is the conversion factor between MaxSize and bytes. It is a + // variable so tests can mock it out and not need to write megabytes of data + // to disk. + megabyte = 1024 * 1024 +) + +// Write implements io.Writer. If a write would cause the log file to be larger +// than MaxSize, the file is closed, renamed to include a timestamp of the +// current time, and a new log file is created using the original log file name. +// If the length of the write is greater than MaxSize, an error is returned. +func (l *Logger) Write(p []byte) (n int, err error) { + l.mu.Lock() + defer l.mu.Unlock() + + writeLen := int64(len(p)) + if writeLen > l.max() { + return 0, fmt.Errorf( + "write length %d exceeds maximum file size %d", writeLen, l.max(), + ) + } + + if l.file == nil { + if err = l.openExistingOrNew(len(p)); err != nil { + return 0, err + } + } + + if l.size+writeLen > l.max() { + if err := l.rotate(); err != nil { + return 0, err + } + } + + n, err = l.file.Write(p) + l.size += int64(n) + + return n, err +} + +// Close implements io.Closer, and closes the current logfile. +func (l *Logger) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.close() +} + +// close closes the file if it is open. +func (l *Logger) close() error { + if l.file == nil { + return nil + } + err := l.file.Close() + l.file = nil + return err +} + +// Rotate causes Logger to close the existing log file and immediately create a +// new one. This is a helper function for applications that want to initiate +// rotations outside of the normal rotation rules, such as in response to +// SIGHUP. After rotating, this initiates compression and removal of old log +// files according to the configuration. +func (l *Logger) Rotate() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.rotate() +} + +// rotate closes the current file, moves it aside with a timestamp in the name, +// (if it exists), opens a new file with the original filename, and then runs +// post-rotation processing and removal. +func (l *Logger) rotate() error { + if err := l.close(); err != nil { + return err + } + if err := l.openNew(); err != nil { + return err + } + l.mill() + return nil +} + +// openNew opens a new log file for writing, moving any old log file out of the +// way. This methods assumes the file has already been closed. +func (l *Logger) openNew() error { + err := os.MkdirAll(l.dir(), 0744) + if err != nil { + return fmt.Errorf("can't make directories for new logfile: %s", err) + } + + name := l.filename() + mode := os.FileMode(0644) + info, err := os_Stat(name) + if err == nil { + // Copy the mode off the old logfile. + mode = info.Mode() + // move the existing file + newname := backupName(name, l.LocalTime) + if err := os.Rename(name, newname); err != nil { + return fmt.Errorf("can't rename log file: %s", err) + } + + // this is a no-op anywhere but linux + if err := chown(name, info); err != nil { + return err + } + } + + // we use truncate here because this should only get called when we've moved + // the file ourselves. if someone else creates the file in the meantime, + // just wipe out the contents. + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) + if err != nil { + return fmt.Errorf("can't open new logfile: %s", err) + } + l.file = f + l.size = 0 + return nil +} + +// backupName creates a new filename from the given name, inserting a timestamp +// between the filename and the extension, using the local time if requested +// (otherwise UTC). +func backupName(name string, local bool) string { + dir := filepath.Dir(name) + filename := filepath.Base(name) + ext := filepath.Ext(filename) + prefix := filename[:len(filename)-len(ext)] + t := currentTime() + if !local { + t = t.UTC() + } + + timestamp := t.Format(backupTimeFormat) + return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext)) +} + +// openExistingOrNew opens the logfile if it exists and if the current write +// would not put it over MaxSize. If there is no such file or the write would +// put it over the MaxSize, a new file is created. +func (l *Logger) openExistingOrNew(writeLen int) error { + l.mill() + + filename := l.filename() + info, err := os_Stat(filename) + if os.IsNotExist(err) { + return l.openNew() + } + if err != nil { + return fmt.Errorf("error getting log file info: %s", err) + } + + if info.Size()+int64(writeLen) >= l.max() { + return l.rotate() + } + + file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + // if we fail to open the old log file for some reason, just ignore + // it and open a new log file. + return l.openNew() + } + l.file = file + l.size = info.Size() + return nil +} + +// genFilename generates the name of the logfile from the current time. +func (l *Logger) filename() string { + if l.Filename != "" { + return l.Filename + } + name := filepath.Base(os.Args[0]) + "-lumberjack.log" + return filepath.Join(os.TempDir(), name) +} + +// millRunOnce performs compression and removal of stale log files. +// Log files are compressed if enabled via configuration and old log +// files are removed, keeping at most l.MaxBackups files, as long as +// none of them are older than MaxAge. +func (l *Logger) millRunOnce() error { + if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress { + return nil + } + + files, err := l.oldLogFiles() + if err != nil { + return err + } + + var compress, remove []logInfo + + if l.MaxBackups > 0 && l.MaxBackups < len(files) { + preserved := make(map[string]bool) + var remaining []logInfo + for _, f := range files { + // Only count the uncompressed log file or the + // compressed log file, not both. + fn := f.Name() + if strings.HasSuffix(fn, compressSuffix) { + fn = fn[:len(fn)-len(compressSuffix)] + } + preserved[fn] = true + + if len(preserved) > l.MaxBackups { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + if l.MaxAge > 0 { + diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) + cutoff := currentTime().Add(-1 * diff) + + var remaining []logInfo + for _, f := range files { + if f.timestamp.Before(cutoff) { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + + if l.Compress { + for _, f := range files { + if !strings.HasSuffix(f.Name(), compressSuffix) { + compress = append(compress, f) + } + } + } + + for _, f := range remove { + errRemove := os.Remove(filepath.Join(l.dir(), f.Name())) + if err == nil && errRemove != nil { + err = errRemove + } + } + for _, f := range compress { + fn := filepath.Join(l.dir(), f.Name()) + errCompress := compressLogFile(fn, fn+compressSuffix) + if err == nil && errCompress != nil { + err = errCompress + } + } + + return err +} + +// millRun runs in a goroutine to manage post-rotation compression and removal +// of old log files. +func (l *Logger) millRun() { + for _ = range l.millCh { + // what am I going to do, log this? + _ = l.millRunOnce() + } +} + +// mill performs post-rotation compression and removal of stale log files, +// starting the mill goroutine if necessary. +func (l *Logger) mill() { + l.startMill.Do(func() { + l.millCh = make(chan bool, 1) + go l.millRun() + }) + select { + case l.millCh <- true: + default: + } +} + +// oldLogFiles returns the list of backup log files stored in the same +// directory as the current log file, sorted by ModTime +func (l *Logger) oldLogFiles() ([]logInfo, error) { + files, err := ioutil.ReadDir(l.dir()) + if err != nil { + return nil, fmt.Errorf("can't read log file directory: %s", err) + } + logFiles := []logInfo{} + + prefix, ext := l.prefixAndExt() + + for _, f := range files { + if f.IsDir() { + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + // error parsing means that the suffix at the end was not generated + // by lumberjack, and therefore it's not a backup file. + } + + sort.Sort(byFormatTime(logFiles)) + + return logFiles, nil +} + +// timeFromName extracts the formatted time from the filename by stripping off +// the filename's prefix and extension. This prevents someone's filename from +// confusing time.parse. +func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) { + if !strings.HasPrefix(filename, prefix) { + return time.Time{}, errors.New("mismatched prefix") + } + if !strings.HasSuffix(filename, ext) { + return time.Time{}, errors.New("mismatched extension") + } + ts := filename[len(prefix) : len(filename)-len(ext)] + return time.Parse(backupTimeFormat, ts) +} + +// max returns the maximum size in bytes of log files before rolling. +func (l *Logger) max() int64 { + if l.MaxSize == 0 { + return int64(defaultMaxSize * megabyte) + } + return int64(l.MaxSize) * int64(megabyte) +} + +// dir returns the directory for the current filename. +func (l *Logger) dir() string { + return filepath.Dir(l.filename()) +} + +// prefixAndExt returns the filename part and extension part from the Logger's +// filename. +func (l *Logger) prefixAndExt() (prefix, ext string) { + filename := filepath.Base(l.filename()) + ext = filepath.Ext(filename) + prefix = filename[:len(filename)-len(ext)] + "-" + return prefix, ext +} + +// compressLogFile compresses the given log file, removing the +// uncompressed log file if successful. +func compressLogFile(src, dst string) (err error) { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open log file: %v", err) + } + defer f.Close() + + fi, err := os_Stat(src) + if err != nil { + return fmt.Errorf("failed to stat log file: %v", err) + } + + if err := chown(dst, fi); err != nil { + return fmt.Errorf("failed to chown compressed log file: %v", err) + } + + // If this file already exists, we presume it was created by + // a previous attempt to compress the log file. + gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) + if err != nil { + return fmt.Errorf("failed to open compressed log file: %v", err) + } + defer gzf.Close() + + gz := gzip.NewWriter(gzf) + + defer func() { + if err != nil { + os.Remove(dst) + err = fmt.Errorf("failed to compress log file: %v", err) + } + }() + + if _, err := io.Copy(gz, f); err != nil { + return err + } + if err := gz.Close(); err != nil { + return err + } + if err := gzf.Close(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + if err := os.Remove(src); err != nil { + return err + } + + return nil +} + +// logInfo is a convenience struct to return the filename and its embedded +// timestamp. +type logInfo struct { + timestamp time.Time + os.FileInfo +} + +// byFormatTime sorts by newest time formatted in the name. +type byFormatTime []logInfo + +func (b byFormatTime) Less(i, j int) bool { + return b[i].timestamp.After(b[j].timestamp) +} + +func (b byFormatTime) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byFormatTime) Len() int { + return len(b) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go b/vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go new file mode 100644 index 000000000..6e7d5bc82 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/install/install.go @@ -0,0 +1,37 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/apis/audit/v1" + "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + "k8s.io/apiserver/pkg/apis/audit/v1beta1" +) + +// Install registers the API group and adds types to a scheme +func Install(scheme *runtime.Scheme) { + utilruntime.Must(audit.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/doc.go b/vendor/k8s.io/apiserver/pkg/apis/config/doc.go new file mode 100644 index 000000000..338d4cebf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package config // import "k8s.io/apiserver/pkg/apis/config" diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/register.go b/vendor/k8s.io/apiserver/pkg/apis/config/register.go new file mode 100644 index 000000000..6a0aae8e5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds this group to a scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package. +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &EncryptionConfiguration{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/types.go b/vendor/k8s.io/apiserver/pkg/apis/config/types.go new file mode 100644 index 000000000..5dddc97f9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EncryptionConfiguration stores the complete configuration for encryption providers. +type EncryptionConfiguration struct { + metav1.TypeMeta + // resources is a list containing resources, and their corresponding encryption providers. + Resources []ResourceConfiguration +} + +// ResourceConfiguration stores per resource configuration. +type ResourceConfiguration struct { + // resources is a list of kubernetes resources which have to be encrypted. + Resources []string + // providers is a list of transformers to be used for reading and writing the resources to disk. + // eg: aesgcm, aescbc, secretbox, identity. + Providers []ProviderConfiguration +} + +// ProviderConfiguration stores the provided configuration for an encryption provider. +type ProviderConfiguration struct { + // aesgcm is the configuration for the AES-GCM transformer. + AESGCM *AESConfiguration + // aescbc is the configuration for the AES-CBC transformer. + AESCBC *AESConfiguration + // secretbox is the configuration for the Secretbox based transformer. + Secretbox *SecretboxConfiguration + // identity is the (empty) configuration for the identity transformer. + Identity *IdentityConfiguration + // kms contains the name, cache size and path to configuration file for a KMS based envelope transformer. + KMS *KMSConfiguration +} + +// AESConfiguration contains the API configuration for an AES transformer. +type AESConfiguration struct { + // keys is a list of keys to be used for creating the AES transformer. + // Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM. + Keys []Key +} + +// SecretboxConfiguration contains the API configuration for an Secretbox transformer. +type SecretboxConfiguration struct { + // keys is a list of keys to be used for creating the Secretbox transformer. + // Each key has to be 32 bytes long. + Keys []Key +} + +// Key contains name and secret of the provided key for a transformer. +type Key struct { + // name is the name of the key to be used while storing data to disk. + Name string + // secret is the actual key, encoded in base64. + Secret string +} + +// String implements Stringer interface in a log safe way. +func (k Key) String() string { + return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name) +} + +// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration. +type IdentityConfiguration struct{} + +// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer. +type KMSConfiguration struct { + // name is the name of the KMS plugin to be used. + Name string + // cachesize is the maximum number of secrets which are cached in memory. The default value is 1000. + // Set to a negative value to disable caching. + // +optional + CacheSize *int32 + // endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock". + Endpoint string + // timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds. + // +optional + Timeout *metav1.Duration +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go new file mode 100644 index 000000000..2d529651a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + defaultTimeout = &metav1.Duration{Duration: 3 * time.Second} + defaultCacheSize int32 = 1000 +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_KMSConfiguration applies defaults to KMSConfiguration. +func SetDefaults_KMSConfiguration(obj *KMSConfiguration) { + if obj.Timeout == nil { + obj.Timeout = defaultTimeout + } + + if obj.CacheSize == nil { + obj.CacheSize = &defaultCacheSize + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go new file mode 100644 index 000000000..b1a18ccab --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/config +// +k8s:deepcopy-gen=package +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go new file mode 100644 index 000000000..32b5634c4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package. +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // AddToScheme adds this group to a scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) + localSchemeBuilder.Register(addDefaultingFuncs) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EncryptionConfiguration{}, + ) + // also register into the v1 group as EncryptionConfig (due to a docs bug) + scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "EncryptionConfig"}, &EncryptionConfiguration{}) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go new file mode 100644 index 000000000..d7d68d258 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EncryptionConfiguration stores the complete configuration for encryption providers. +type EncryptionConfiguration struct { + metav1.TypeMeta + // resources is a list containing resources, and their corresponding encryption providers. + Resources []ResourceConfiguration `json:"resources"` +} + +// ResourceConfiguration stores per resource configuration. +type ResourceConfiguration struct { + // resources is a list of kubernetes resources which have to be encrypted. + Resources []string `json:"resources"` + // providers is a list of transformers to be used for reading and writing the resources to disk. + // eg: aesgcm, aescbc, secretbox, identity. + Providers []ProviderConfiguration `json:"providers"` +} + +// ProviderConfiguration stores the provided configuration for an encryption provider. +type ProviderConfiguration struct { + // aesgcm is the configuration for the AES-GCM transformer. + AESGCM *AESConfiguration `json:"aesgcm,omitempty"` + // aescbc is the configuration for the AES-CBC transformer. + AESCBC *AESConfiguration `json:"aescbc,omitempty"` + // secretbox is the configuration for the Secretbox based transformer. + Secretbox *SecretboxConfiguration `json:"secretbox,omitempty"` + // identity is the (empty) configuration for the identity transformer. + Identity *IdentityConfiguration `json:"identity,omitempty"` + // kms contains the name, cache size and path to configuration file for a KMS based envelope transformer. + KMS *KMSConfiguration `json:"kms,omitempty"` +} + +// AESConfiguration contains the API configuration for an AES transformer. +type AESConfiguration struct { + // keys is a list of keys to be used for creating the AES transformer. + // Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM. + Keys []Key `json:"keys"` +} + +// SecretboxConfiguration contains the API configuration for an Secretbox transformer. +type SecretboxConfiguration struct { + // keys is a list of keys to be used for creating the Secretbox transformer. + // Each key has to be 32 bytes long. + Keys []Key `json:"keys"` +} + +// Key contains name and secret of the provided key for a transformer. +type Key struct { + // name is the name of the key to be used while storing data to disk. + Name string `json:"name"` + // secret is the actual key, encoded in base64. + Secret string `json:"secret"` +} + +// String implements Stringer interface in a log safe way. +func (k Key) String() string { + return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name) +} + +// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration. +type IdentityConfiguration struct{} + +// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer. +type KMSConfiguration struct { + // name is the name of the KMS plugin to be used. + Name string `json:"name"` + // cachesize is the maximum number of secrets which are cached in memory. The default value is 1000. + // Set to a negative value to disable caching. + // +optional + CacheSize *int32 `json:"cachesize,omitempty"` + // endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock". + Endpoint string `json:"endpoint"` + // timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds. + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go new file mode 100644 index 000000000..c7de6539d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go @@ -0,0 +1,296 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + config "k8s.io/apiserver/pkg/apis/config" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AESConfiguration)(nil), (*config.AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AESConfiguration_To_config_AESConfiguration(a.(*AESConfiguration), b.(*config.AESConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.AESConfiguration)(nil), (*AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_AESConfiguration_To_v1_AESConfiguration(a.(*config.AESConfiguration), b.(*AESConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EncryptionConfiguration)(nil), (*config.EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(a.(*EncryptionConfiguration), b.(*config.EncryptionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.EncryptionConfiguration)(nil), (*EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(a.(*config.EncryptionConfiguration), b.(*EncryptionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*IdentityConfiguration)(nil), (*config.IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration(a.(*IdentityConfiguration), b.(*config.IdentityConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.IdentityConfiguration)(nil), (*IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration(a.(*config.IdentityConfiguration), b.(*IdentityConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KMSConfiguration)(nil), (*config.KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_KMSConfiguration_To_config_KMSConfiguration(a.(*KMSConfiguration), b.(*config.KMSConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.KMSConfiguration)(nil), (*KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KMSConfiguration_To_v1_KMSConfiguration(a.(*config.KMSConfiguration), b.(*KMSConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Key)(nil), (*config.Key)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Key_To_config_Key(a.(*Key), b.(*config.Key), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.Key)(nil), (*Key)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_Key_To_v1_Key(a.(*config.Key), b.(*Key), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProviderConfiguration)(nil), (*config.ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration(a.(*ProviderConfiguration), b.(*config.ProviderConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ProviderConfiguration)(nil), (*ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration(a.(*config.ProviderConfiguration), b.(*ProviderConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ResourceConfiguration)(nil), (*config.ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration(a.(*ResourceConfiguration), b.(*config.ResourceConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ResourceConfiguration)(nil), (*ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration(a.(*config.ResourceConfiguration), b.(*ResourceConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SecretboxConfiguration)(nil), (*config.SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(a.(*SecretboxConfiguration), b.(*config.SecretboxConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SecretboxConfiguration)(nil), (*SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(a.(*config.SecretboxConfiguration), b.(*SecretboxConfiguration), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_AESConfiguration_To_config_AESConfiguration(in *AESConfiguration, out *config.AESConfiguration, s conversion.Scope) error { + out.Keys = *(*[]config.Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_v1_AESConfiguration_To_config_AESConfiguration is an autogenerated conversion function. +func Convert_v1_AESConfiguration_To_config_AESConfiguration(in *AESConfiguration, out *config.AESConfiguration, s conversion.Scope) error { + return autoConvert_v1_AESConfiguration_To_config_AESConfiguration(in, out, s) +} + +func autoConvert_config_AESConfiguration_To_v1_AESConfiguration(in *config.AESConfiguration, out *AESConfiguration, s conversion.Scope) error { + out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_config_AESConfiguration_To_v1_AESConfiguration is an autogenerated conversion function. +func Convert_config_AESConfiguration_To_v1_AESConfiguration(in *config.AESConfiguration, out *AESConfiguration, s conversion.Scope) error { + return autoConvert_config_AESConfiguration_To_v1_AESConfiguration(in, out, s) +} + +func autoConvert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in *EncryptionConfiguration, out *config.EncryptionConfiguration, s conversion.Scope) error { + out.Resources = *(*[]config.ResourceConfiguration)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration is an autogenerated conversion function. +func Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in *EncryptionConfiguration, out *config.EncryptionConfiguration, s conversion.Scope) error { + return autoConvert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in, out, s) +} + +func autoConvert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *config.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error { + out.Resources = *(*[]ResourceConfiguration)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration is an autogenerated conversion function. +func Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *config.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error { + return autoConvert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in, out, s) +} + +func autoConvert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in *IdentityConfiguration, out *config.IdentityConfiguration, s conversion.Scope) error { + return nil +} + +// Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration is an autogenerated conversion function. +func Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in *IdentityConfiguration, out *config.IdentityConfiguration, s conversion.Scope) error { + return autoConvert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in, out, s) +} + +func autoConvert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in *config.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error { + return nil +} + +// Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration is an autogenerated conversion function. +func Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in *config.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error { + return autoConvert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in, out, s) +} + +func autoConvert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration, out *config.KMSConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize)) + out.Endpoint = in.Endpoint + out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_v1_KMSConfiguration_To_config_KMSConfiguration is an autogenerated conversion function. +func Convert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration, out *config.KMSConfiguration, s conversion.Scope) error { + return autoConvert_v1_KMSConfiguration_To_config_KMSConfiguration(in, out, s) +} + +func autoConvert_config_KMSConfiguration_To_v1_KMSConfiguration(in *config.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize)) + out.Endpoint = in.Endpoint + out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_config_KMSConfiguration_To_v1_KMSConfiguration is an autogenerated conversion function. +func Convert_config_KMSConfiguration_To_v1_KMSConfiguration(in *config.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error { + return autoConvert_config_KMSConfiguration_To_v1_KMSConfiguration(in, out, s) +} + +func autoConvert_v1_Key_To_config_Key(in *Key, out *config.Key, s conversion.Scope) error { + out.Name = in.Name + out.Secret = in.Secret + return nil +} + +// Convert_v1_Key_To_config_Key is an autogenerated conversion function. +func Convert_v1_Key_To_config_Key(in *Key, out *config.Key, s conversion.Scope) error { + return autoConvert_v1_Key_To_config_Key(in, out, s) +} + +func autoConvert_config_Key_To_v1_Key(in *config.Key, out *Key, s conversion.Scope) error { + out.Name = in.Name + out.Secret = in.Secret + return nil +} + +// Convert_config_Key_To_v1_Key is an autogenerated conversion function. +func Convert_config_Key_To_v1_Key(in *config.Key, out *Key, s conversion.Scope) error { + return autoConvert_config_Key_To_v1_Key(in, out, s) +} + +func autoConvert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in *ProviderConfiguration, out *config.ProviderConfiguration, s conversion.Scope) error { + out.AESGCM = (*config.AESConfiguration)(unsafe.Pointer(in.AESGCM)) + out.AESCBC = (*config.AESConfiguration)(unsafe.Pointer(in.AESCBC)) + out.Secretbox = (*config.SecretboxConfiguration)(unsafe.Pointer(in.Secretbox)) + out.Identity = (*config.IdentityConfiguration)(unsafe.Pointer(in.Identity)) + out.KMS = (*config.KMSConfiguration)(unsafe.Pointer(in.KMS)) + return nil +} + +// Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration is an autogenerated conversion function. +func Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in *ProviderConfiguration, out *config.ProviderConfiguration, s conversion.Scope) error { + return autoConvert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in, out, s) +} + +func autoConvert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in *config.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error { + out.AESGCM = (*AESConfiguration)(unsafe.Pointer(in.AESGCM)) + out.AESCBC = (*AESConfiguration)(unsafe.Pointer(in.AESCBC)) + out.Secretbox = (*SecretboxConfiguration)(unsafe.Pointer(in.Secretbox)) + out.Identity = (*IdentityConfiguration)(unsafe.Pointer(in.Identity)) + out.KMS = (*KMSConfiguration)(unsafe.Pointer(in.KMS)) + return nil +} + +// Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration is an autogenerated conversion function. +func Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in *config.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error { + return autoConvert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in, out, s) +} + +func autoConvert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in *ResourceConfiguration, out *config.ResourceConfiguration, s conversion.Scope) error { + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.Providers = *(*[]config.ProviderConfiguration)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration is an autogenerated conversion function. +func Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in *ResourceConfiguration, out *config.ResourceConfiguration, s conversion.Scope) error { + return autoConvert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in, out, s) +} + +func autoConvert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in *config.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error { + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.Providers = *(*[]ProviderConfiguration)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration is an autogenerated conversion function. +func Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in *config.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error { + return autoConvert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in, out, s) +} + +func autoConvert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in *SecretboxConfiguration, out *config.SecretboxConfiguration, s conversion.Scope) error { + out.Keys = *(*[]config.Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration is an autogenerated conversion function. +func Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in *SecretboxConfiguration, out *config.SecretboxConfiguration, s conversion.Scope) error { + return autoConvert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in, out, s) +} + +func autoConvert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *config.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { + out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration is an autogenerated conversion function. +func Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *config.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { + return autoConvert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..dcb4e8552 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration. +func (in *AESConfiguration) DeepCopy() *AESConfiguration { + if in == nil { + return nil + } + out := new(AESConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration. +func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { + if in == nil { + return nil + } + out := new(IdentityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) { + *out = *in + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration. +func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { + if in == nil { + return nil + } + out := new(KMSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) { + *out = *in + if in.AESGCM != nil { + in, out := &in.AESGCM, &out.AESGCM + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Secretbox != nil { + in, out := &in.Secretbox, &out.Secretbox + *out = new(SecretboxConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityConfiguration) + **out = **in + } + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration. +func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { + if in == nil { + return nil + } + out := new(ProviderConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]ProviderConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration. +func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { + if in == nil { + return nil + } + out := new(ResourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration. +func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { + if in == nil { + return nil + } + out := new(SecretboxConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go new file mode 100644 index 000000000..1c8db8d04 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go @@ -0,0 +1,45 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&EncryptionConfiguration{}, func(obj interface{}) { SetObjectDefaults_EncryptionConfiguration(obj.(*EncryptionConfiguration)) }) + return nil +} + +func SetObjectDefaults_EncryptionConfiguration(in *EncryptionConfiguration) { + for i := range in.Resources { + a := &in.Resources[i] + for j := range a.Providers { + b := &a.Providers[j] + if b.KMS != nil { + SetDefaults_KMSConfiguration(b.KMS) + } + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go new file mode 100644 index 000000000..966ff1f0d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go @@ -0,0 +1,220 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation validates EncryptionConfiguration. +package validation + +import ( + "encoding/base64" + "fmt" + "net/url" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/apis/config" +) + +const ( + moreThanOneElementErr = "more than one provider specified in a single element, should split into different list elements" + keyLenErrFmt = "secret is not of the expected length, got %d, expected one of %v" + unsupportedSchemeErrFmt = "unsupported scheme %q for KMS provider, only unix is supported" + atLeastOneRequiredErrFmt = "at least one %s is required" + invalidURLErrFmt = "invalid endpoint for kms provider, error: parse %s: net/url: invalid control character in URL" + mandatoryFieldErrFmt = "%s is a mandatory field for a %s" + base64EncodingErr = "secrets must be base64 encoded" + zeroOrNegativeErrFmt = "%s should be a positive value" + nonZeroErrFmt = "%s should be a positive value, or negative to disable" + encryptionConfigNilErr = "EncryptionConfiguration can't be nil" +) + +var ( + aesKeySizes = []int{16, 24, 32} + // See https://golang.org/pkg/crypto/aes/#NewCipher for details on supported key sizes for AES. + secretBoxKeySizes = []int{32} + // See https://godoc.org/golang.org/x/crypto/nacl/secretbox#Open for details on the supported key sizes for Secretbox. + root = field.NewPath("resources") +) + +// ValidateEncryptionConfiguration validates a v1.EncryptionConfiguration. +func ValidateEncryptionConfiguration(c *config.EncryptionConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + + if c == nil { + allErrs = append(allErrs, field.Required(root, "EncryptionConfiguration can't be nil")) + return allErrs + } + + if len(c.Resources) == 0 { + allErrs = append(allErrs, field.Required(root, fmt.Sprintf(atLeastOneRequiredErrFmt, root))) + return allErrs + } + + for i, conf := range c.Resources { + r := root.Index(i).Child("resources") + p := root.Index(i).Child("providers") + + if len(conf.Resources) == 0 { + allErrs = append(allErrs, field.Required(r, fmt.Sprintf(atLeastOneRequiredErrFmt, r))) + } + + if len(conf.Providers) == 0 { + allErrs = append(allErrs, field.Required(p, fmt.Sprintf(atLeastOneRequiredErrFmt, p))) + } + + for j, provider := range conf.Providers { + path := p.Index(j) + allErrs = append(allErrs, validateSingleProvider(provider, path)...) + + switch { + case provider.KMS != nil: + allErrs = append(allErrs, validateKMSConfiguration(provider.KMS, path.Child("kms"))...) + case provider.AESGCM != nil: + allErrs = append(allErrs, validateKeys(provider.AESGCM.Keys, path.Child("aesgcm").Child("keys"), aesKeySizes)...) + case provider.AESCBC != nil: + allErrs = append(allErrs, validateKeys(provider.AESCBC.Keys, path.Child("aescbc").Child("keys"), aesKeySizes)...) + case provider.Secretbox != nil: + allErrs = append(allErrs, validateKeys(provider.Secretbox.Keys, path.Child("secretbox").Child("keys"), secretBoxKeySizes)...) + } + } + } + + return allErrs +} + +func validateSingleProvider(provider config.ProviderConfiguration, filedPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + found := 0 + + if provider.KMS != nil { + found++ + } + if provider.AESGCM != nil { + found++ + } + if provider.AESCBC != nil { + found++ + } + if provider.Secretbox != nil { + found++ + } + if provider.Identity != nil { + found++ + } + + if found == 0 { + return append(allErrs, field.Invalid(filedPath, provider, "provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity")) + } + + if found > 1 { + return append(allErrs, field.Invalid(filedPath, provider, moreThanOneElementErr)) + } + + return allErrs +} + +func validateKeys(keys []config.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList { + allErrs := field.ErrorList{} + + if len(keys) == 0 { + allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf(atLeastOneRequiredErrFmt, "keys"))) + return allErrs + } + + for i, key := range keys { + allErrs = append(allErrs, validateKey(key, fieldPath.Index(i), expectedLen)...) + } + + return allErrs +} + +func validateKey(key config.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList { + allErrs := field.ErrorList{} + + if key.Name == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("name"), fmt.Sprintf(mandatoryFieldErrFmt, "name", "key"))) + } + + if key.Secret == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("secret"), fmt.Sprintf(mandatoryFieldErrFmt, "secret", "key"))) + return allErrs + } + + secret, err := base64.StdEncoding.DecodeString(key.Secret) + if err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", base64EncodingErr)) + return allErrs + } + + lenMatched := false + for _, l := range expectedLen { + if len(secret) == l { + lenMatched = true + break + } + } + + if !lenMatched { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", fmt.Sprintf(keyLenErrFmt, len(secret), expectedLen))) + } + + return allErrs +} + +func validateKMSConfiguration(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if c.Name == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("name"), fmt.Sprintf(mandatoryFieldErrFmt, "name", "provider"))) + } + allErrs = append(allErrs, validateKMSTimeout(c, fieldPath.Child("timeout"))...) + allErrs = append(allErrs, validateKMSEndpoint(c, fieldPath.Child("endpoint"))...) + allErrs = append(allErrs, validateKMSCacheSize(c, fieldPath.Child("cachesize"))...) + return allErrs +} + +func validateKMSCacheSize(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if *c.CacheSize == 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, *c.CacheSize, fmt.Sprintf(nonZeroErrFmt, "cachesize"))) + } + + return allErrs +} + +func validateKMSTimeout(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if c.Timeout.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, c.Timeout, fmt.Sprintf(zeroOrNegativeErrFmt, "timeout"))) + } + + return allErrs +} + +func validateKMSEndpoint(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(c.Endpoint) == 0 { + return append(allErrs, field.Invalid(fieldPath, "", fmt.Sprintf(mandatoryFieldErrFmt, "endpoint", "kms"))) + } + + u, err := url.Parse(c.Endpoint) + if err != nil { + return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf("invalid endpoint for kms provider, error: %v", err))) + } + + if u.Scheme != "unix" { + return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf(unsupportedSchemeErrFmt, u.Scheme))) + } + + return allErrs +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go new file mode 100644 index 000000000..dd66315ee --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration. +func (in *AESConfiguration) DeepCopy() *AESConfiguration { + if in == nil { + return nil + } + out := new(AESConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration. +func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { + if in == nil { + return nil + } + out := new(IdentityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) { + *out = *in + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration. +func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { + if in == nil { + return nil + } + out := new(KMSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) { + *out = *in + if in.AESGCM != nil { + in, out := &in.AESGCM, &out.AESGCM + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Secretbox != nil { + in, out := &in.Secretbox, &out.Secretbox + *out = new(SecretboxConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityConfiguration) + **out = **in + } + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration. +func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { + if in == nil { + return nil + } + out := new(ProviderConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]ProviderConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration. +func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { + if in == nil { + return nil + } + out := new(ResourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration. +func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { + if in == nil { + return nil + } + out := new(SecretboxConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/path/doc.go b/vendor/k8s.io/apiserver/pkg/authorization/path/doc.go new file mode 100644 index 000000000..654aaeb74 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/path/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package path contains an authorizer that allows certain paths and path prefixes. +package path // import "k8s.io/apiserver/pkg/authorization/path" diff --git a/vendor/k8s.io/apiserver/pkg/authorization/path/path.go b/vendor/k8s.io/apiserver/pkg/authorization/path/path.go new file mode 100644 index 000000000..0e1ec2338 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/path/path.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// NewAuthorizer returns an authorizer which accepts a given set of paths. +// Each path is either a fully matching path or it ends in * in case a prefix match is done. A leading / is optional. +func NewAuthorizer(alwaysAllowPaths []string) (authorizer.Authorizer, error) { + var prefixes []string + paths := sets.NewString() + for _, p := range alwaysAllowPaths { + p = strings.TrimPrefix(p, "/") + if len(p) == 0 { + // matches "/" + paths.Insert(p) + continue + } + if strings.ContainsRune(p[:len(p)-1], '*') { + return nil, fmt.Errorf("only trailing * allowed in %q", p) + } + if strings.HasSuffix(p, "*") { + prefixes = append(prefixes, p[:len(p)-1]) + } else { + paths.Insert(p) + } + } + + return authorizer.AuthorizerFunc(func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { + if a.IsResourceRequest() { + return authorizer.DecisionNoOpinion, "", nil + } + + pth := strings.TrimPrefix(a.GetPath(), "/") + if paths.Has(pth) { + return authorizer.DecisionAllow, "", nil + } + + for _, prefix := range prefixes { + if strings.HasPrefix(pth, prefix) { + return authorizer.DecisionAllow, "", nil + } + } + + return authorizer.DecisionNoOpinion, "", nil + }), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go new file mode 100644 index 000000000..e066d2a7f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go @@ -0,0 +1,91 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" +) + +type decoratedWatcher struct { + w watch.Interface + decorator func(runtime.Object) + cancel context.CancelFunc + resultCh chan watch.Event +} + +func newDecoratedWatcher(ctx context.Context, w watch.Interface, decorator func(runtime.Object)) *decoratedWatcher { + ctx, cancel := context.WithCancel(ctx) + d := &decoratedWatcher{ + w: w, + decorator: decorator, + cancel: cancel, + resultCh: make(chan watch.Event), + } + go d.run(ctx) + return d +} + +// run decorates watch events from the underlying watcher until its result channel +// is closed or the passed in context is done. +// When run() returns, decoratedWatcher#resultCh is closed. +func (d *decoratedWatcher) run(ctx context.Context) { + var recv, send watch.Event + var ok bool + defer close(d.resultCh) + for { + select { + case recv, ok = <-d.w.ResultChan(): + if !ok { + // The underlying channel was closed, cancel our context + d.cancel() + return + } + switch recv.Type { + case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark: + d.decorator(recv.Object) + send = recv + case watch.Error: + send = recv + } + select { + case d.resultCh <- send: + // propagated event successfully + case <-ctx.Done(): + // context timed out or was cancelled, stop the underlying watcher + d.w.Stop() + return + } + case <-ctx.Done(): + // context timed out or was cancelled, stop the underlying watcher + d.w.Stop() + return + } + } +} + +func (d *decoratedWatcher) Stop() { + // stop the underlying watcher + d.w.Stop() + // cancel our context + d.cancel() +} + +func (d *decoratedWatcher) ResultChan() <-chan watch.Event { + return d.resultCh +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go new file mode 100644 index 000000000..bd315ae47 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package etcd has a generic implementation of a registry that +// stores things in etcd. +package registry // import "k8s.io/apiserver/pkg/registry/generic/registry" diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go new file mode 100644 index 000000000..e25684a8a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" +) + +type DryRunnableStorage struct { + Storage storage.Interface + Codec runtime.Codec +} + +func (s *DryRunnableStorage) Versioner() storage.Versioner { + return s.Storage.Versioner() +} + +func (s *DryRunnableStorage) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64, dryRun bool) error { + if dryRun { + if err := s.Storage.Get(ctx, key, storage.GetOptions{}, out); err == nil { + return storage.NewKeyExistsError(key, 0) + } + return s.copyInto(obj, out) + } + return s.Storage.Create(ctx, key, obj, out, ttl) +} + +func (s *DryRunnableStorage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, deleteValidation storage.ValidateObjectFunc, dryRun bool, cachedExistingObject runtime.Object) error { + if dryRun { + if err := s.Storage.Get(ctx, key, storage.GetOptions{}, out); err != nil { + return err + } + if err := preconditions.Check(key, out); err != nil { + return err + } + return deleteValidation(ctx, out) + } + return s.Storage.Delete(ctx, key, out, preconditions, deleteValidation, cachedExistingObject) +} + +func (s *DryRunnableStorage) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.Storage.Watch(ctx, key, opts) +} + +func (s *DryRunnableStorage) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return s.Storage.WatchList(ctx, key, opts) +} + +func (s *DryRunnableStorage) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error { + return s.Storage.Get(ctx, key, opts, objPtr) +} + +func (s *DryRunnableStorage) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + return s.Storage.GetToList(ctx, key, opts, listObj) +} + +func (s *DryRunnableStorage) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + return s.Storage.List(ctx, key, opts, listObj) +} + +func (s *DryRunnableStorage) GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, cachedExistingObject runtime.Object) error { + if dryRun { + err := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, ptrToType) + if err != nil { + return err + } + err = preconditions.Check(key, ptrToType) + if err != nil { + return err + } + rev, err := s.Versioner().ObjectResourceVersion(ptrToType) + if err != nil { + return err + } + out, _, err := tryUpdate(ptrToType, storage.ResponseMeta{ResourceVersion: rev}) + if err != nil { + return err + } + return s.copyInto(out, ptrToType) + } + return s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, cachedExistingObject) +} + +func (s *DryRunnableStorage) Count(key string) (int64, error) { + return s.Storage.Count(key) +} + +func (s *DryRunnableStorage) copyInto(in, out runtime.Object) error { + var data []byte + + data, err := runtime.Encode(s.Codec, in) + if err != nil { + return err + } + _, _, err = s.Codec.Decode(data, nil, out) + if err != nil { + return err + } + return nil + +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go new file mode 100644 index 000000000..f2fa59723 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -0,0 +1,138 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "fmt" + "sync" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/storage" + cacherstorage "k8s.io/apiserver/pkg/storage/cacher" + "k8s.io/apiserver/pkg/storage/etcd3" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/client-go/tools/cache" +) + +// Creates a cacher based given storageConfig. +func StorageWithCacher() generic.StorageDecorator { + return func( + storageConfig *storagebackend.Config, + resourcePrefix string, + keyFunc func(obj runtime.Object) (string, error), + newFunc func() runtime.Object, + newListFunc func() runtime.Object, + getAttrsFunc storage.AttrFunc, + triggerFuncs storage.IndexerFuncs, + indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { + + s, d, err := generic.NewRawStorage(storageConfig, newFunc) + if err != nil { + return s, d, err + } + if klog.V(5).Enabled() { + klog.InfoS("Storage caching is enabled", objectTypeToArgs(newFunc())...) + } + + cacherConfig := cacherstorage.Config{ + Storage: s, + Versioner: etcd3.APIObjectVersioner{}, + ResourcePrefix: resourcePrefix, + KeyFunc: keyFunc, + NewFunc: newFunc, + NewListFunc: newListFunc, + GetAttrsFunc: getAttrsFunc, + IndexerFuncs: triggerFuncs, + Indexers: indexers, + Codec: storageConfig.Codec, + } + cacher, err := cacherstorage.NewCacherFromConfig(cacherConfig) + if err != nil { + return nil, func() {}, err + } + destroyFunc := func() { + cacher.Stop() + d() + } + + // TODO : Remove RegisterStorageCleanup below when PR + // https://github.com/kubernetes/kubernetes/pull/50690 + // merges as that shuts down storage properly + RegisterStorageCleanup(destroyFunc) + + return cacher, destroyFunc, nil + } +} + +func objectTypeToArgs(obj runtime.Object) []interface{} { + // special-case unstructured objects that tell us their apiVersion/kind + if u, isUnstructured := obj.(*unstructured.Unstructured); isUnstructured { + if apiVersion, kind := u.GetAPIVersion(), u.GetKind(); len(apiVersion) > 0 && len(kind) > 0 { + return []interface{}{"apiVersion", apiVersion, "kind", kind} + } + } + + // otherwise just return the type + return []interface{}{"type", fmt.Sprintf("%T", obj)} +} + +// TODO : Remove all the code below when PR +// https://github.com/kubernetes/kubernetes/pull/50690 +// merges as that shuts down storage properly +// HACK ALERT : Track the destroy methods to call them +// from the test harness. TrackStorageCleanup will be called +// only from the test harness, so Register/Cleanup will be +// no-op at runtime. + +var cleanupLock sync.Mutex +var cleanup []func() = nil + +func TrackStorageCleanup() { + cleanupLock.Lock() + defer cleanupLock.Unlock() + + if cleanup != nil { + panic("Conflicting storage tracking") + } + cleanup = make([]func(), 0) +} + +func RegisterStorageCleanup(fn func()) { + cleanupLock.Lock() + defer cleanupLock.Unlock() + + if cleanup == nil { + return + } + cleanup = append(cleanup, fn) +} + +func CleanupStorage() { + cleanupLock.Lock() + old := cleanup + cleanup = nil + cleanupLock.Unlock() + + for _, d := range old { + d() + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go new file mode 100644 index 000000000..7a626855a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -0,0 +1,1478 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/validation/path" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/storage" + storeerr "k8s.io/apiserver/pkg/storage/errors" + "k8s.io/apiserver/pkg/storage/etcd3/metrics" + "k8s.io/apiserver/pkg/util/dryrun" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + + "k8s.io/klog/v2" +) + +// FinishFunc is a function returned by Begin hooks to complete an operation. +type FinishFunc func(ctx context.Context, success bool) + +// AfterDeleteFunc is the type used for the Store.AfterDelete hook. +type AfterDeleteFunc func(obj runtime.Object, options *metav1.DeleteOptions) + +// BeginCreateFunc is the type used for the Store.BeginCreate hook. +type BeginCreateFunc func(ctx context.Context, obj runtime.Object, options *metav1.CreateOptions) (FinishFunc, error) + +// AfterCreateFunc is the type used for the Store.AfterCreate hook. +type AfterCreateFunc func(obj runtime.Object, options *metav1.CreateOptions) + +// BeginUpdateFunc is the type used for the Store.BeginUpdate hook. +type BeginUpdateFunc func(ctx context.Context, obj, old runtime.Object, options *metav1.UpdateOptions) (FinishFunc, error) + +// AfterUpdateFunc is the type used for the Store.AfterUpdate hook. +type AfterUpdateFunc func(obj runtime.Object, options *metav1.UpdateOptions) + +// GenericStore interface can be used for type assertions when we need to access the underlying strategies. +type GenericStore interface { + GetCreateStrategy() rest.RESTCreateStrategy + GetUpdateStrategy() rest.RESTUpdateStrategy + GetDeleteStrategy() rest.RESTDeleteStrategy +} + +// Store implements k8s.io/apiserver/pkg/registry/rest.StandardStorage. It's +// intended to be embeddable and allows the consumer to implement any +// non-generic functions that are required. This object is intended to be +// copyable so that it can be used in different ways but share the same +// underlying behavior. +// +// All fields are required unless specified. +// +// The intended use of this type is embedding within a Kind specific +// RESTStorage implementation. This type provides CRUD semantics on a Kubelike +// resource, handling details like conflict detection with ResourceVersion and +// semantics. The RESTCreateStrategy, RESTUpdateStrategy, and +// RESTDeleteStrategy are generic across all backends, and encapsulate logic +// specific to the API. +// +// TODO: make the default exposed methods exactly match a generic RESTStorage +type Store struct { + // NewFunc returns a new instance of the type this registry returns for a + // GET of a single object, e.g.: + // + // curl GET /apis/group/version/namespaces/my-ns/myresource/name-of-object + NewFunc func() runtime.Object + + // NewListFunc returns a new list of the type this registry; it is the + // type returned when the resource is listed, e.g.: + // + // curl GET /apis/group/version/namespaces/my-ns/myresource + NewListFunc func() runtime.Object + + // DefaultQualifiedResource is the pluralized name of the resource. + // This field is used if there is no request info present in the context. + // See qualifiedResourceFromContext for details. + DefaultQualifiedResource schema.GroupResource + + // KeyRootFunc returns the root etcd key for this resource; should not + // include trailing "/". This is used for operations that work on the + // entire collection (listing and watching). + // + // KeyRootFunc and KeyFunc must be supplied together or not at all. + KeyRootFunc func(ctx context.Context) string + + // KeyFunc returns the key for a specific object in the collection. + // KeyFunc is called for Create/Update/Get/Delete. Note that 'namespace' + // can be gotten from ctx. + // + // KeyFunc and KeyRootFunc must be supplied together or not at all. + KeyFunc func(ctx context.Context, name string) (string, error) + + // ObjectNameFunc returns the name of an object or an error. + ObjectNameFunc func(obj runtime.Object) (string, error) + + // TTLFunc returns the TTL (time to live) that objects should be persisted + // with. The existing parameter is the current TTL or the default for this + // operation. The update parameter indicates whether this is an operation + // against an existing object. + // + // Objects that are persisted with a TTL are evicted once the TTL expires. + TTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error) + + // PredicateFunc returns a matcher corresponding to the provided labels + // and fields. The SelectionPredicate returned should return true if the + // object matches the given field and label selectors. + PredicateFunc func(label labels.Selector, field fields.Selector) storage.SelectionPredicate + + // EnableGarbageCollection affects the handling of Update and Delete + // requests. Enabling garbage collection allows finalizers to do work to + // finalize this object before the store deletes it. + // + // If any store has garbage collection enabled, it must also be enabled in + // the kube-controller-manager. + EnableGarbageCollection bool + + // DeleteCollectionWorkers is the maximum number of workers in a single + // DeleteCollection call. Delete requests for the items in a collection + // are issued in parallel. + DeleteCollectionWorkers int + + // Decorator is an optional exit hook on an object returned from the + // underlying storage. The returned object could be an individual object + // (e.g. Pod) or a list type (e.g. PodList). Decorator is intended for + // integrations that are above storage and should only be used for + // specific cases where storage of the value is not appropriate, since + // they cannot be watched. + Decorator func(runtime.Object) + + // CreateStrategy implements resource-specific behavior during creation. + CreateStrategy rest.RESTCreateStrategy + // BeginCreate is an optional hook that returns a "transaction-like" + // commit/revert function which will be called at the end of the operation, + // but before AfterCreate and Decorator, indicating via the argument + // whether the operation succeeded. If this returns an error, the function + // is not called. Almost nobody should use this hook. + BeginCreate BeginCreateFunc + // AfterCreate implements a further operation to run after a resource is + // created and before it is decorated, optional. + AfterCreate AfterCreateFunc + + // UpdateStrategy implements resource-specific behavior during updates. + UpdateStrategy rest.RESTUpdateStrategy + // BeginUpdate is an optional hook that returns a "transaction-like" + // commit/revert function which will be called at the end of the operation, + // but before AfterUpdate and Decorator, indicating via the argument + // whether the operation succeeded. If this returns an error, the function + // is not called. Almost nobody should use this hook. + BeginUpdate BeginUpdateFunc + // AfterUpdate implements a further operation to run after a resource is + // updated and before it is decorated, optional. + AfterUpdate AfterUpdateFunc + + // DeleteStrategy implements resource-specific behavior during deletion. + DeleteStrategy rest.RESTDeleteStrategy + // AfterDelete implements a further operation to run after a resource is + // deleted and before it is decorated, optional. + AfterDelete AfterDeleteFunc + // ReturnDeletedObject determines whether the Store returns the object + // that was deleted. Otherwise, return a generic success status response. + ReturnDeletedObject bool + // ShouldDeleteDuringUpdate is an optional function to determine whether + // an update from existing to obj should result in a delete. + // If specified, this is checked in addition to standard finalizer, + // deletionTimestamp, and deletionGracePeriodSeconds checks. + ShouldDeleteDuringUpdate func(ctx context.Context, key string, obj, existing runtime.Object) bool + + // TableConvertor is an optional interface for transforming items or lists + // of items into tabular output. If unset, the default will be used. + TableConvertor rest.TableConvertor + + // ResetFieldsStrategy provides the fields reset by the strategy that + // should not be modified by the user. + ResetFieldsStrategy rest.ResetFieldsStrategy + + // Storage is the interface for the underlying storage for the + // resource. It is wrapped into a "DryRunnableStorage" that will + // either pass-through or simply dry-run. + Storage DryRunnableStorage + // StorageVersioner outputs the an object will be + // converted to before persisted in etcd, given a list of possible + // kinds of the object. + // If the StorageVersioner is nil, apiserver will leave the + // storageVersionHash as empty in the discovery document. + StorageVersioner runtime.GroupVersioner + // Called to cleanup clients used by the underlying Storage; optional. + DestroyFunc func() +} + +// Note: the rest.StandardStorage interface aggregates the common REST verbs +var _ rest.StandardStorage = &Store{} +var _ rest.TableConvertor = &Store{} +var _ GenericStore = &Store{} + +const ( + OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + resourceCountPollPeriodJitter = 1.2 +) + +// NamespaceKeyRootFunc is the default function for constructing storage paths +// to resource directories enforcing namespace rules. +func NamespaceKeyRootFunc(ctx context.Context, prefix string) string { + key := prefix + ns, ok := genericapirequest.NamespaceFrom(ctx) + if ok && len(ns) > 0 { + key = key + "/" + ns + } + return key +} + +// NamespaceKeyFunc is the default function for constructing storage paths to +// a resource relative to the given prefix enforcing namespace rules. If the +// context does not contain a namespace, it errors. +func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) { + key := NamespaceKeyRootFunc(ctx, prefix) + ns, ok := genericapirequest.NamespaceFrom(ctx) + if !ok || len(ns) == 0 { + return "", apierrors.NewBadRequest("Namespace parameter required.") + } + if len(name) == 0 { + return "", apierrors.NewBadRequest("Name parameter required.") + } + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key = key + "/" + name + return key, nil +} + +// NoNamespaceKeyFunc is the default function for constructing storage paths +// to a resource relative to the given prefix without a namespace. +func NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) { + if len(name) == 0 { + return "", apierrors.NewBadRequest("Name parameter required.") + } + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key := prefix + "/" + name + return key, nil +} + +// New implements RESTStorage.New. +func (e *Store) New() runtime.Object { + return e.NewFunc() +} + +// NewList implements rest.Lister. +func (e *Store) NewList() runtime.Object { + return e.NewListFunc() +} + +// NamespaceScoped indicates whether the resource is namespaced +func (e *Store) NamespaceScoped() bool { + if e.CreateStrategy != nil { + return e.CreateStrategy.NamespaceScoped() + } + if e.UpdateStrategy != nil { + return e.UpdateStrategy.NamespaceScoped() + } + + panic("programmer error: no CRUD for resource, you're crazy, override NamespaceScoped too") +} + +// GetCreateStrategy implements GenericStore. +func (e *Store) GetCreateStrategy() rest.RESTCreateStrategy { + return e.CreateStrategy +} + +// GetUpdateStrategy implements GenericStore. +func (e *Store) GetUpdateStrategy() rest.RESTUpdateStrategy { + return e.UpdateStrategy +} + +// GetDeleteStrategy implements GenericStore. +func (e *Store) GetDeleteStrategy() rest.RESTDeleteStrategy { + return e.DeleteStrategy +} + +// List returns a list of items matching labels and field according to the +// store's PredicateFunc. +func (e *Store) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) { + label := labels.Everything() + if options != nil && options.LabelSelector != nil { + label = options.LabelSelector + } + field := fields.Everything() + if options != nil && options.FieldSelector != nil { + field = options.FieldSelector + } + out, err := e.ListPredicate(ctx, e.PredicateFunc(label, field), options) + if err != nil { + return nil, err + } + if e.Decorator != nil { + e.Decorator(out) + } + return out, nil +} + +// ListPredicate returns a list of all the items matching the given +// SelectionPredicate. +func (e *Store) ListPredicate(ctx context.Context, p storage.SelectionPredicate, options *metainternalversion.ListOptions) (runtime.Object, error) { + if options == nil { + // By default we should serve the request from etcd. + options = &metainternalversion.ListOptions{ResourceVersion: ""} + } + p.Limit = options.Limit + p.Continue = options.Continue + list := e.NewListFunc() + qualifiedResource := e.qualifiedResourceFromContext(ctx) + storageOpts := storage.ListOptions{ResourceVersion: options.ResourceVersion, ResourceVersionMatch: options.ResourceVersionMatch, Predicate: p} + if name, ok := p.MatchesSingle(); ok { + if key, err := e.KeyFunc(ctx, name); err == nil { + err := e.Storage.GetToList(ctx, key, storageOpts, list) + return list, storeerr.InterpretListError(err, qualifiedResource) + } + // if we cannot extract a key based on the current context, the optimization is skipped + } + + err := e.Storage.List(ctx, e.KeyRootFunc(ctx), storageOpts, list) + return list, storeerr.InterpretListError(err, qualifiedResource) +} + +// finishNothing is a do-nothing FinishFunc. +func finishNothing(context.Context, bool) {} + +// Create inserts a new item according to the unique key from the object. +func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { + var finishCreate FinishFunc = finishNothing + + if e.BeginCreate != nil { + fn, err := e.BeginCreate(ctx, obj, options) + if err != nil { + return nil, err + } + finishCreate = fn + defer func() { + finishCreate(ctx, false) + }() + } + + if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { + return nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if createValidation != nil { + if err := createValidation(ctx, obj.DeepCopyObject()); err != nil { + return nil, err + } + } + + name, err := e.ObjectNameFunc(obj) + if err != nil { + return nil, err + } + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + qualifiedResource := e.qualifiedResourceFromContext(ctx) + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, err + } + out := e.NewFunc() + if err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil { + err = storeerr.InterpretCreateError(err, qualifiedResource, name) + err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj) + if !apierrors.IsAlreadyExists(err) { + return nil, err + } + if errGet := e.Storage.Get(ctx, key, storage.GetOptions{}, out); errGet != nil { + return nil, err + } + accessor, errGetAcc := meta.Accessor(out) + if errGetAcc != nil { + return nil, err + } + if accessor.GetDeletionTimestamp() != nil { + msg := &err.(*apierrors.StatusError).ErrStatus.Message + *msg = fmt.Sprintf("object is being deleted: %s", *msg) + } + return nil, err + } + // The operation has succeeded. Call the finish function if there is one, + // and then make sure the defer doesn't call it again. + fn := finishCreate + finishCreate = finishNothing + fn(ctx, true) + + if e.AfterCreate != nil { + e.AfterCreate(out, options) + } + if e.Decorator != nil { + e.Decorator(out) + } + return out, nil +} + +// ShouldDeleteDuringUpdate is the default function for +// checking if an object should be deleted during an update. +// It checks if the new object has no finalizers, +// the existing object's deletionTimestamp is set, and +// the existing object's deletionGracePeriodSeconds is 0 or nil +func ShouldDeleteDuringUpdate(ctx context.Context, key string, obj, existing runtime.Object) bool { + newMeta, err := meta.Accessor(obj) + if err != nil { + utilruntime.HandleError(err) + return false + } + oldMeta, err := meta.Accessor(existing) + if err != nil { + utilruntime.HandleError(err) + return false + } + if len(newMeta.GetFinalizers()) > 0 { + // don't delete with finalizers remaining in the new object + return false + } + if oldMeta.GetDeletionTimestamp() == nil { + // don't delete if the existing object hasn't had a delete request made + return false + } + // delete if the existing object has no grace period or a grace period of 0 + return oldMeta.GetDeletionGracePeriodSeconds() == nil || *oldMeta.GetDeletionGracePeriodSeconds() == 0 +} + +// deleteWithoutFinalizers handles deleting an object ignoring its finalizer list. +// Used for objects that are either been finalized or have never initialized. +func (e *Store) deleteWithoutFinalizers(ctx context.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions, options *metav1.DeleteOptions) (runtime.Object, bool, error) { + out := e.NewFunc() + klog.V(6).InfoS("Going to delete object from registry, triggered by update", "object", klog.KRef(genericapirequest.NamespaceValue(ctx), name)) + // Using the rest.ValidateAllObjectFunc because the request is an UPDATE request and has already passed the admission for the UPDATE verb. + if err := e.Storage.Delete(ctx, key, out, preconditions, rest.ValidateAllObjectFunc, dryrun.IsDryRun(options.DryRun), nil); err != nil { + // Deletion is racy, i.e., there could be multiple update + // requests to remove all finalizers from the object, so we + // ignore the NotFound error. + if storage.IsNotFound(err) { + _, err := e.finalizeDelete(ctx, obj, true, options) + // clients are expecting an updated object if a PUT succeeded, + // but finalizeDelete returns a metav1.Status, so return + // the object in the request instead. + return obj, false, err + } + return nil, false, storeerr.InterpretDeleteError(err, e.qualifiedResourceFromContext(ctx), name) + } + _, err := e.finalizeDelete(ctx, out, true, options) + // clients are expecting an updated object if a PUT succeeded, but + // finalizeDelete returns a metav1.Status, so return the object in + // the request instead. + return obj, false, err +} + +// Update performs an atomic update and set of the object. Returns the result of the update +// or an error. If the registry allows create-on-update, the create flow will be executed. +// A bool is returned along with the object and any errors, to indicate object creation. +func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, false, err + } + + var ( + creatingObj runtime.Object + creating = false + ) + + qualifiedResource := e.qualifiedResourceFromContext(ctx) + storagePreconditions := &storage.Preconditions{} + if preconditions := objInfo.Preconditions(); preconditions != nil { + storagePreconditions.UID = preconditions.UID + storagePreconditions.ResourceVersion = preconditions.ResourceVersion + } + + out := e.NewFunc() + // deleteObj is only used in case a deletion is carried out + var deleteObj runtime.Object + err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + existingResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(existing) + if err != nil { + return nil, nil, err + } + if existingResourceVersion == 0 { + if !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate { + return nil, nil, apierrors.NewNotFound(qualifiedResource, name) + } + } + + // Given the existing object, get the new object + obj, err := objInfo.UpdatedObject(ctx, existing) + if err != nil { + return nil, nil, err + } + + // If AllowUnconditionalUpdate() is true and the object specified by + // the user does not have a resource version, then we populate it with + // the latest version. Else, we check that the version specified by + // the user matches the version of latest storage object. + newResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) + if err != nil { + return nil, nil, err + } + doUnconditionalUpdate := newResourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() + + if existingResourceVersion == 0 { + var finishCreate FinishFunc = finishNothing + + if e.BeginCreate != nil { + fn, err := e.BeginCreate(ctx, obj, newCreateOptionsFromUpdateOptions(options)) + if err != nil { + return nil, nil, err + } + finishCreate = fn + defer func() { + finishCreate(ctx, false) + }() + } + + creating = true + creatingObj = obj + if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { + return nil, nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if createValidation != nil { + if err := createValidation(ctx, obj.DeepCopyObject()); err != nil { + return nil, nil, err + } + } + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, nil, err + } + + // The operation has succeeded. Call the finish function if there is one, + // and then make sure the defer doesn't call it again. + fn := finishCreate + finishCreate = finishNothing + fn(ctx, true) + + return obj, &ttl, nil + } + + creating = false + creatingObj = nil + if doUnconditionalUpdate { + // Update the object's resource version to match the latest + // storage object's resource version. + err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion) + if err != nil { + return nil, nil, err + } + } else { + // Check if the object's resource version matches the latest + // resource version. + if newResourceVersion == 0 { + // TODO: The Invalid error should have a field for Resource. + // After that field is added, we should fill the Resource and + // leave the Kind field empty. See the discussion in #18526. + qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource} + fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newResourceVersion, "must be specified for an update")} + return nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList) + } + if newResourceVersion != existingResourceVersion { + return nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg)) + } + } + + var finishUpdate FinishFunc = finishNothing + + if e.BeginUpdate != nil { + fn, err := e.BeginUpdate(ctx, obj, existing, options) + if err != nil { + return nil, nil, err + } + finishUpdate = fn + defer func() { + finishUpdate(ctx, false) + }() + } + + if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { + return nil, nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if updateValidation != nil { + if err := updateValidation(ctx, obj.DeepCopyObject(), existing.DeepCopyObject()); err != nil { + return nil, nil, err + } + } + // Check the default delete-during-update conditions, and store-specific conditions if provided + if ShouldDeleteDuringUpdate(ctx, key, obj, existing) && + (e.ShouldDeleteDuringUpdate == nil || e.ShouldDeleteDuringUpdate(ctx, key, obj, existing)) { + deleteObj = obj + return nil, nil, errEmptiedFinalizers + } + ttl, err := e.calculateTTL(obj, res.TTL, true) + if err != nil { + return nil, nil, err + } + + // The operation has succeeded. Call the finish function if there is one, + // and then make sure the defer doesn't call it again. + fn := finishUpdate + finishUpdate = finishNothing + fn(ctx, true) + + if int64(ttl) != res.TTL { + return obj, &ttl, nil + } + return obj, nil, nil + }, dryrun.IsDryRun(options.DryRun), nil) + + if err != nil { + // delete the object + if err == errEmptiedFinalizers { + return e.deleteWithoutFinalizers(ctx, name, key, deleteObj, storagePreconditions, newDeleteOptionsFromUpdateOptions(options)) + } + if creating { + err = storeerr.InterpretCreateError(err, qualifiedResource, name) + err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj) + } else { + err = storeerr.InterpretUpdateError(err, qualifiedResource, name) + } + return nil, false, err + } + + if creating { + if e.AfterCreate != nil { + e.AfterCreate(out, newCreateOptionsFromUpdateOptions(options)) + } + } else { + if e.AfterUpdate != nil { + e.AfterUpdate(out, options) + } + } + if e.Decorator != nil { + e.Decorator(out) + } + return out, creating, nil +} + +// This is a helper to convert UpdateOptions to CreateOptions for the +// create-on-update path. +func newCreateOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.CreateOptions { + co := &metav1.CreateOptions{ + DryRun: in.DryRun, + FieldManager: in.FieldManager, + } + co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) + return co +} + +// This is a helper to convert UpdateOptions to DeleteOptions for the +// delete-on-update path. +func newDeleteOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.DeleteOptions { + do := &metav1.DeleteOptions{ + DryRun: in.DryRun, + } + do.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions")) + return do +} + +// Get retrieves the item from storage. +func (e *Store) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { + obj := e.NewFunc() + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + if err := e.Storage.Get(ctx, key, storage.GetOptions{ResourceVersion: options.ResourceVersion}, obj); err != nil { + return nil, storeerr.InterpretGetError(err, e.qualifiedResourceFromContext(ctx), name) + } + if e.Decorator != nil { + e.Decorator(obj) + } + return obj, nil +} + +// qualifiedResourceFromContext attempts to retrieve a GroupResource from the context's request info. +// If the context has no request info, DefaultQualifiedResource is used. +func (e *Store) qualifiedResourceFromContext(ctx context.Context) schema.GroupResource { + if info, ok := genericapirequest.RequestInfoFrom(ctx); ok { + return schema.GroupResource{Group: info.APIGroup, Resource: info.Resource} + } + // some implementations access storage directly and thus the context has no RequestInfo + return e.DefaultQualifiedResource +} + +var ( + errAlreadyDeleting = fmt.Errorf("abort delete") + errDeleteNow = fmt.Errorf("delete now") + errEmptiedFinalizers = fmt.Errorf("emptied finalizers") +) + +// shouldOrphanDependents returns true if the finalizer for orphaning should be set +// updated for FinalizerOrphanDependents. In the order of highest to lowest +// priority, there are three factors affect whether to add/remove the +// FinalizerOrphanDependents: options, existing finalizers of the object, +// and e.DeleteStrategy.DefaultGarbageCollectionPolicy. +func shouldOrphanDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { + // Get default GC policy from this REST object type + gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy) + var defaultGCPolicy rest.GarbageCollectionPolicy + if ok { + defaultGCPolicy = gcStrategy.DefaultGarbageCollectionPolicy(ctx) + } + + if defaultGCPolicy == rest.Unsupported { + // return false to indicate that we should NOT orphan + return false + } + + // An explicit policy was set at deletion time, that overrides everything + //lint:ignore SA1019 backwards compatibility + if options != nil && options.OrphanDependents != nil { + //lint:ignore SA1019 backwards compatibility + return *options.OrphanDependents + } + if options != nil && options.PropagationPolicy != nil { + switch *options.PropagationPolicy { + case metav1.DeletePropagationOrphan: + return true + case metav1.DeletePropagationBackground, metav1.DeletePropagationForeground: + return false + } + } + + // If a finalizer is set in the object, it overrides the default + // validation should make sure the two cases won't be true at the same time. + finalizers := accessor.GetFinalizers() + for _, f := range finalizers { + switch f { + case metav1.FinalizerOrphanDependents: + return true + case metav1.FinalizerDeleteDependents: + return false + } + } + + // Get default orphan policy from this REST object type if it exists + return defaultGCPolicy == rest.OrphanDependents +} + +// shouldDeleteDependents returns true if the finalizer for foreground deletion should be set +// updated for FinalizerDeleteDependents. In the order of highest to lowest +// priority, there are three factors affect whether to add/remove the +// FinalizerDeleteDependents: options, existing finalizers of the object, and +// e.DeleteStrategy.DefaultGarbageCollectionPolicy. +func shouldDeleteDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { + // Get default GC policy from this REST object type + if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy(ctx) == rest.Unsupported { + // return false to indicate that we should NOT delete in foreground + return false + } + + // If an explicit policy was set at deletion time, that overrides both + //lint:ignore SA1019 backwards compatibility + if options != nil && options.OrphanDependents != nil { + return false + } + if options != nil && options.PropagationPolicy != nil { + switch *options.PropagationPolicy { + case metav1.DeletePropagationForeground: + return true + case metav1.DeletePropagationBackground, metav1.DeletePropagationOrphan: + return false + } + } + + // If a finalizer is set in the object, it overrides the default + // validation has made sure the two cases won't be true at the same time. + finalizers := accessor.GetFinalizers() + for _, f := range finalizers { + switch f { + case metav1.FinalizerDeleteDependents: + return true + case metav1.FinalizerOrphanDependents: + return false + } + } + + return false +} + +// deletionFinalizersForGarbageCollection analyzes the object and delete options +// to determine whether the object is in need of finalization by the garbage +// collector. If so, returns the set of deletion finalizers to apply and a bool +// indicating whether the finalizer list has changed and is in need of updating. +// +// The finalizers returned are intended to be handled by the garbage collector. +// If garbage collection is disabled for the store, this function returns false +// to ensure finalizers aren't set which will never be cleared. +func deletionFinalizersForGarbageCollection(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) { + if !e.EnableGarbageCollection { + return false, []string{} + } + shouldOrphan := shouldOrphanDependents(ctx, e, accessor, options) + shouldDeleteDependentInForeground := shouldDeleteDependents(ctx, e, accessor, options) + newFinalizers := []string{} + + // first remove both finalizers, add them back if needed. + for _, f := range accessor.GetFinalizers() { + if f == metav1.FinalizerOrphanDependents || f == metav1.FinalizerDeleteDependents { + continue + } + newFinalizers = append(newFinalizers, f) + } + + if shouldOrphan { + newFinalizers = append(newFinalizers, metav1.FinalizerOrphanDependents) + } + if shouldDeleteDependentInForeground { + newFinalizers = append(newFinalizers, metav1.FinalizerDeleteDependents) + } + + oldFinalizerSet := sets.NewString(accessor.GetFinalizers()...) + newFinalizersSet := sets.NewString(newFinalizers...) + if oldFinalizerSet.Equal(newFinalizersSet) { + return false, accessor.GetFinalizers() + } + return true, newFinalizers +} + +// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the +// DeletionTimestamp to "now" if there is no existing deletionTimestamp or if the existing +// deletionTimestamp is further in future. Finalizers are watching for such updates and will +// finalize the object if their IDs are present in the object's Finalizers list. +func markAsDeleting(obj runtime.Object, now time.Time) (err error) { + objectMeta, kerr := meta.Accessor(obj) + if kerr != nil { + return kerr + } + // This handles Generation bump for resources that don't support graceful + // deletion. For resources that support graceful deletion is handle in + // pkg/api/rest/delete.go + if objectMeta.GetDeletionTimestamp() == nil && objectMeta.GetGeneration() > 0 { + objectMeta.SetGeneration(objectMeta.GetGeneration() + 1) + } + existingDeletionTimestamp := objectMeta.GetDeletionTimestamp() + if existingDeletionTimestamp == nil || existingDeletionTimestamp.After(now) { + metaNow := metav1.NewTime(now) + objectMeta.SetDeletionTimestamp(&metaNow) + } + var zero int64 = 0 + objectMeta.SetDeletionGracePeriodSeconds(&zero) + return nil +} + +// updateForGracefulDeletionAndFinalizers updates the given object for +// graceful deletion and finalization by setting the deletion timestamp and +// grace period seconds (graceful deletion) and updating the list of +// finalizers (finalization); it returns: +// +// 1. an error +// 2. a boolean indicating that the object was not found, but it should be +// ignored +// 3. a boolean indicating that the object's grace period is exhausted and it +// should be deleted immediately +// 4. a new output object with the state that was updated +// 5. a copy of the last existing state of the object +func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) { + lastGraceful := int64(0) + var pendingFinalizers bool + out = e.NewFunc() + err = e.Storage.GuaranteedUpdate( + ctx, + key, + out, + false, /* ignoreNotFound */ + &preconditions, + storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) { + if err := deleteValidation(ctx, existing); err != nil { + return nil, err + } + graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options) + if err != nil { + return nil, err + } + if pendingGraceful { + return nil, errAlreadyDeleting + } + + // Add/remove the orphan finalizer as the options dictates. + // Note that this occurs after checking pendingGraceufl, so + // finalizers cannot be updated via DeleteOptions if deletion has + // started. + existingAccessor, err := meta.Accessor(existing) + if err != nil { + return nil, err + } + needsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(ctx, e, existingAccessor, options) + if needsUpdate { + existingAccessor.SetFinalizers(newFinalizers) + } + + pendingFinalizers = len(existingAccessor.GetFinalizers()) != 0 + if !graceful { + // set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion + if pendingFinalizers { + klog.V(6).InfoS("Object has pending finalizers, so the registry is going to update its status to deleting", + "object", klog.KRef(genericapirequest.NamespaceValue(ctx), name), "gracePeriod", time.Second*0) + err = markAsDeleting(existing, time.Now()) + if err != nil { + return nil, err + } + return existing, nil + } + return nil, errDeleteNow + } + lastGraceful = *options.GracePeriodSeconds + lastExisting = existing + return existing, nil + }), + dryrun.IsDryRun(options.DryRun), + nil, + ) + switch err { + case nil: + // If there are pending finalizers, we never delete the object immediately. + if pendingFinalizers { + return nil, false, false, out, lastExisting + } + if lastGraceful > 0 { + return nil, false, false, out, lastExisting + } + // If we are here, the registry supports grace period mechanism and + // we are intentionally delete gracelessly. In this case, we may + // enter a race with other k8s components. If other component wins + // the race, the object will not be found, and we should tolerate + // the NotFound error. See + // https://github.com/kubernetes/kubernetes/issues/19403 for + // details. + return nil, true, true, out, lastExisting + case errDeleteNow: + // we've updated the object to have a zero grace period, or it's already at 0, so + // we should fall through and truly delete the object. + return nil, false, true, out, lastExisting + case errAlreadyDeleting: + out, err = e.finalizeDelete(ctx, in, true, options) + return err, false, false, out, lastExisting + default: + return storeerr.InterpretUpdateError(err, e.qualifiedResourceFromContext(ctx), name), false, false, out, lastExisting + } +} + +// Delete removes the item from storage. +// options can be mutated by rest.BeforeDelete due to a graceful deletion strategy. +func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, false, err + } + obj := e.NewFunc() + qualifiedResource := e.qualifiedResourceFromContext(ctx) + if err = e.Storage.Get(ctx, key, storage.GetOptions{}, obj); err != nil { + return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name) + } + + // support older consumers of delete by treating "nil" as delete immediately + if options == nil { + options = metav1.NewDeleteOptions(0) + } + var preconditions storage.Preconditions + if options.Preconditions != nil { + preconditions.UID = options.Preconditions.UID + preconditions.ResourceVersion = options.Preconditions.ResourceVersion + } + graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options) + if err != nil { + return nil, false, err + } + // this means finalizers cannot be updated via DeleteOptions if a deletion is already pending + if pendingGraceful { + out, err := e.finalizeDelete(ctx, obj, false, options) + return out, false, err + } + // check if obj has pending finalizers + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, false, apierrors.NewInternalError(err) + } + pendingFinalizers := len(accessor.GetFinalizers()) != 0 + var ignoreNotFound bool + var deleteImmediately bool = true + var lastExisting, out runtime.Object + + // Handle combinations of graceful deletion and finalization by issuing + // the correct updates. + shouldUpdateFinalizers, _ := deletionFinalizersForGarbageCollection(ctx, e, accessor, options) + // TODO: remove the check, because we support no-op updates now. + if graceful || pendingFinalizers || shouldUpdateFinalizers { + err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, deleteValidation, obj) + // Update the preconditions.ResourceVersion if set since we updated the object. + if err == nil && deleteImmediately && preconditions.ResourceVersion != nil { + accessor, err = meta.Accessor(out) + if err != nil { + return out, false, apierrors.NewInternalError(err) + } + resourceVersion := accessor.GetResourceVersion() + preconditions.ResourceVersion = &resourceVersion + } + } + + // !deleteImmediately covers all cases where err != nil. We keep both to be future-proof. + if !deleteImmediately || err != nil { + return out, false, err + } + + // Going further in this function is not useful when we are + // performing a dry-run request. Worse, it will actually + // override "out" with the version of the object in database + // that doesn't have the finalizer and deletiontimestamp set + // (because the update above was dry-run too). If we already + // have that version available, let's just return it now, + // otherwise, we can call dry-run delete that will get us the + // latest version of the object. + if dryrun.IsDryRun(options.DryRun) && out != nil { + return out, true, nil + } + + // delete immediately, or no graceful deletion supported + klog.V(6).InfoS("Going to delete object from registry", "object", klog.KRef(genericapirequest.NamespaceValue(ctx), name)) + out = e.NewFunc() + if err := e.Storage.Delete(ctx, key, out, &preconditions, storage.ValidateObjectFunc(deleteValidation), dryrun.IsDryRun(options.DryRun), nil); err != nil { + // Please refer to the place where we set ignoreNotFound for the reason + // why we ignore the NotFound error . + if storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil { + // The lastExisting object may not be the last state of the object + // before its deletion, but it's the best approximation. + out, err := e.finalizeDelete(ctx, lastExisting, true, options) + return out, true, err + } + return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name) + } + out, err = e.finalizeDelete(ctx, out, true, options) + return out, true, err +} + +// DeleteReturnsDeletedObject implements the rest.MayReturnFullObjectDeleter interface +func (e *Store) DeleteReturnsDeletedObject() bool { + return e.ReturnDeletedObject +} + +// DeleteCollection removes all items returned by List with a given ListOptions from storage. +// +// DeleteCollection is currently NOT atomic. It can happen that only subset of objects +// will be deleted from storage, and then an error will be returned. +// In case of success, the list of deleted objects will be returned. +// +// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we +// are removing all objects of a given type) with the current API (it's technically +// possibly with storage API, but watch is not delivered correctly then). +// It will be possible to fix it with v3 etcd API. +func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) { + if listOptions == nil { + listOptions = &metainternalversion.ListOptions{} + } else { + listOptions = listOptions.DeepCopy() + } + + listObj, err := e.List(ctx, listOptions) + if err != nil { + return nil, err + } + items, err := meta.ExtractList(listObj) + if err != nil { + return nil, err + } + if len(items) == 0 { + // Nothing to delete, return now + return listObj, nil + } + // Spawn a number of goroutines, so that we can issue requests to storage + // in parallel to speed up deletion. + // It is proportional to the number of items to delete, up to + // DeleteCollectionWorkers (it doesn't make much sense to spawn 16 + // workers to delete 10 items). + workersNumber := e.DeleteCollectionWorkers + if workersNumber > len(items) { + workersNumber = len(items) + } + if workersNumber < 1 { + workersNumber = 1 + } + wg := sync.WaitGroup{} + toProcess := make(chan int, 2*workersNumber) + errs := make(chan error, workersNumber+1) + + go func() { + defer utilruntime.HandleCrash(func(panicReason interface{}) { + errs <- fmt.Errorf("DeleteCollection distributor panicked: %v", panicReason) + }) + for i := 0; i < len(items); i++ { + toProcess <- i + } + close(toProcess) + }() + + wg.Add(workersNumber) + for i := 0; i < workersNumber; i++ { + go func() { + // panics don't cross goroutine boundaries + defer utilruntime.HandleCrash(func(panicReason interface{}) { + errs <- fmt.Errorf("DeleteCollection goroutine panicked: %v", panicReason) + }) + defer wg.Done() + + for index := range toProcess { + accessor, err := meta.Accessor(items[index]) + if err != nil { + errs <- err + return + } + // DeepCopy the deletion options because individual graceful deleters communicate changes via a mutating + // function in the delete strategy called in the delete method. While that is always ugly, it works + // when making a single call. When making multiple calls via delete collection, the mutation applied to + // pod/A can change the option ultimately used for pod/B. + if _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options.DeepCopy()); err != nil && !apierrors.IsNotFound(err) { + klog.V(4).InfoS("Delete object in DeleteCollection failed", "object", klog.KObj(accessor), "err", err) + errs <- err + return + } + } + }() + } + wg.Wait() + select { + case err := <-errs: + return nil, err + default: + return listObj, nil + } +} + +// finalizeDelete runs the Store's AfterDelete hook if runHooks is set and +// returns the decorated deleted object if appropriate. +func (e *Store) finalizeDelete(ctx context.Context, obj runtime.Object, runHooks bool, options *metav1.DeleteOptions) (runtime.Object, error) { + if runHooks && e.AfterDelete != nil { + e.AfterDelete(obj, options) + } + if e.ReturnDeletedObject { + if e.Decorator != nil { + e.Decorator(obj) + } + return obj, nil + } + // Return information about the deleted object, which enables clients to + // verify that the object was actually deleted and not waiting for finalizers. + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + qualifiedResource := e.qualifiedResourceFromContext(ctx) + details := &metav1.StatusDetails{ + Name: accessor.GetName(), + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, // Yes we set Kind field to resource. + UID: accessor.GetUID(), + } + status := &metav1.Status{Status: metav1.StatusSuccess, Details: details} + return status, nil +} + +// Watch makes a matcher for the given label and field, and calls +// WatchPredicate. If possible, you should customize PredicateFunc to produce +// a matcher that matches by key. SelectionPredicate does this for you +// automatically. +func (e *Store) Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) { + label := labels.Everything() + if options != nil && options.LabelSelector != nil { + label = options.LabelSelector + } + field := fields.Everything() + if options != nil && options.FieldSelector != nil { + field = options.FieldSelector + } + predicate := e.PredicateFunc(label, field) + + resourceVersion := "" + if options != nil { + resourceVersion = options.ResourceVersion + predicate.AllowWatchBookmarks = options.AllowWatchBookmarks + } + return e.WatchPredicate(ctx, predicate, resourceVersion) +} + +// WatchPredicate starts a watch for the items that matches. +func (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate, resourceVersion string) (watch.Interface, error) { + storageOpts := storage.ListOptions{ResourceVersion: resourceVersion, Predicate: p} + if name, ok := p.MatchesSingle(); ok { + if key, err := e.KeyFunc(ctx, name); err == nil { + w, err := e.Storage.Watch(ctx, key, storageOpts) + if err != nil { + return nil, err + } + if e.Decorator != nil { + return newDecoratedWatcher(ctx, w, e.Decorator), nil + } + return w, nil + } + // if we cannot extract a key based on the current context, the + // optimization is skipped + } + + w, err := e.Storage.WatchList(ctx, e.KeyRootFunc(ctx), storageOpts) + if err != nil { + return nil, err + } + if e.Decorator != nil { + return newDecoratedWatcher(ctx, w, e.Decorator), nil + } + return w, nil +} + +// calculateTTL is a helper for retrieving the updated TTL for an object or +// returning an error if the TTL cannot be calculated. The defaultTTL is +// changed to 1 if less than zero. Zero means no TTL, not expire immediately. +func (e *Store) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) { + // TODO: validate this is assertion is still valid. + + // etcd may return a negative TTL for a node if the expiration has not + // occurred due to server lag - we will ensure that the value is at least + // set. + if defaultTTL < 0 { + defaultTTL = 1 + } + ttl = uint64(defaultTTL) + if e.TTLFunc != nil { + ttl, err = e.TTLFunc(obj, ttl, update) + } + return ttl, err +} + +// CompleteWithOptions updates the store with the provided options and +// defaults common fields. +func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { + if e.DefaultQualifiedResource.Empty() { + return fmt.Errorf("store %#v must have a non-empty qualified resource", e) + } + if e.NewFunc == nil { + return fmt.Errorf("store for %s must have NewFunc set", e.DefaultQualifiedResource.String()) + } + if e.NewListFunc == nil { + return fmt.Errorf("store for %s must have NewListFunc set", e.DefaultQualifiedResource.String()) + } + if (e.KeyRootFunc == nil) != (e.KeyFunc == nil) { + return fmt.Errorf("store for %s must set both KeyRootFunc and KeyFunc or neither", e.DefaultQualifiedResource.String()) + } + + if e.TableConvertor == nil { + return fmt.Errorf("store for %s must set TableConvertor; rest.NewDefaultTableConvertor(e.DefaultQualifiedResource) can be used to output just name/creation time", e.DefaultQualifiedResource.String()) + } + + var isNamespaced bool + switch { + case e.CreateStrategy != nil: + isNamespaced = e.CreateStrategy.NamespaceScoped() + case e.UpdateStrategy != nil: + isNamespaced = e.UpdateStrategy.NamespaceScoped() + default: + return fmt.Errorf("store for %s must have CreateStrategy or UpdateStrategy set", e.DefaultQualifiedResource.String()) + } + + if e.DeleteStrategy == nil { + return fmt.Errorf("store for %s must have DeleteStrategy set", e.DefaultQualifiedResource.String()) + } + + if options.RESTOptions == nil { + return fmt.Errorf("options for %s must have RESTOptions set", e.DefaultQualifiedResource.String()) + } + + attrFunc := options.AttrFunc + if attrFunc == nil { + if isNamespaced { + attrFunc = storage.DefaultNamespaceScopedAttr + } else { + attrFunc = storage.DefaultClusterScopedAttr + } + } + if e.PredicateFunc == nil { + e.PredicateFunc = func(label labels.Selector, field fields.Selector) storage.SelectionPredicate { + return storage.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: attrFunc, + } + } + } + + err := validateIndexers(options.Indexers) + if err != nil { + return err + } + + opts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource) + if err != nil { + return err + } + + // ResourcePrefix must come from the underlying factory + prefix := opts.ResourcePrefix + if !strings.HasPrefix(prefix, "/") { + prefix = "/" + prefix + } + if prefix == "/" { + return fmt.Errorf("store for %s has an invalid prefix %q", e.DefaultQualifiedResource.String(), opts.ResourcePrefix) + } + + // Set the default behavior for storage key generation + if e.KeyRootFunc == nil && e.KeyFunc == nil { + if isNamespaced { + e.KeyRootFunc = func(ctx context.Context) string { + return NamespaceKeyRootFunc(ctx, prefix) + } + e.KeyFunc = func(ctx context.Context, name string) (string, error) { + return NamespaceKeyFunc(ctx, prefix, name) + } + } else { + e.KeyRootFunc = func(ctx context.Context) string { + return prefix + } + e.KeyFunc = func(ctx context.Context, name string) (string, error) { + return NoNamespaceKeyFunc(ctx, prefix, name) + } + } + } + + // We adapt the store's keyFunc so that we can use it with the StorageDecorator + // without making any assumptions about where objects are stored in etcd + keyFunc := func(obj runtime.Object) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + + if isNamespaced { + return e.KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName()) + } + + return e.KeyFunc(genericapirequest.NewContext(), accessor.GetName()) + } + + if e.DeleteCollectionWorkers == 0 { + e.DeleteCollectionWorkers = opts.DeleteCollectionWorkers + } + + e.EnableGarbageCollection = opts.EnableGarbageCollection + + if e.ObjectNameFunc == nil { + e.ObjectNameFunc = func(obj runtime.Object) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetName(), nil + } + } + + if e.Storage.Storage == nil { + e.Storage.Codec = opts.StorageConfig.Codec + var err error + e.Storage.Storage, e.DestroyFunc, err = opts.Decorator( + opts.StorageConfig, + prefix, + keyFunc, + e.NewFunc, + e.NewListFunc, + attrFunc, + options.TriggerFunc, + options.Indexers, + ) + if err != nil { + return err + } + e.StorageVersioner = opts.StorageConfig.EncodeVersioner + + if opts.CountMetricPollPeriod > 0 { + stopFunc := e.startObservingCount(opts.CountMetricPollPeriod) + previousDestroy := e.DestroyFunc + e.DestroyFunc = func() { + stopFunc() + if previousDestroy != nil { + previousDestroy() + } + } + } + } + + return nil +} + +// startObservingCount starts monitoring given prefix and periodically updating metrics. It returns a function to stop collection. +func (e *Store) startObservingCount(period time.Duration) func() { + prefix := e.KeyRootFunc(genericapirequest.NewContext()) + resourceName := e.DefaultQualifiedResource.String() + klog.V(2).InfoS("Monitoring resource count at path", "resource", resourceName, "path", "/"+prefix) + stopCh := make(chan struct{}) + go wait.JitterUntil(func() { + count, err := e.Storage.Count(prefix) + if err != nil { + klog.V(5).InfoS("Failed to update storage count metric", "err", err) + metrics.UpdateObjectCount(resourceName, -1) + } else { + metrics.UpdateObjectCount(resourceName, count) + } + }, period, resourceCountPollPeriodJitter, true, stopCh) + return func() { close(stopCh) } +} + +func (e *Store) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) { + if e.TableConvertor != nil { + return e.TableConvertor.ConvertToTable(ctx, object, tableOptions) + } + return rest.NewDefaultTableConvertor(e.DefaultQualifiedResource).ConvertToTable(ctx, object, tableOptions) +} + +func (e *Store) StorageVersion() runtime.GroupVersioner { + return e.StorageVersioner +} + +// GetResetFields implements rest.ResetFieldsStrategy +func (e *Store) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { + if e.ResetFieldsStrategy == nil { + return nil + } + return e.ResetFieldsStrategy.GetResetFields() +} + +// validateIndexers will check the prefix of indexers. +func validateIndexers(indexers *cache.Indexers) error { + if indexers == nil { + return nil + } + for indexName := range *indexers { + if len(indexName) <= 2 || (indexName[:2] != "l:" && indexName[:2] != "f:") { + return fmt.Errorf("index must prefix with \"l:\" or \"f:\"") + } + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/OWNERS b/vendor/k8s.io/apiserver/pkg/server/options/OWNERS new file mode 100644 index 000000000..702015ad1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/OWNERS @@ -0,0 +1,15 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- sttts +- jlowdermilk +- soltysh +- dims +- cjcullen +- ping035627 +- xiangpengzhao +- enj diff --git a/vendor/k8s.io/apiserver/pkg/server/options/admission.go b/vendor/k8s.io/apiserver/pkg/server/options/admission.go new file mode 100644 index 000000000..765e2ad2b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/admission.go @@ -0,0 +1,234 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "strings" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" + apiserverapi "k8s.io/apiserver/pkg/apis/apiserver" + apiserverapiv1 "k8s.io/apiserver/pkg/apis/apiserver/v1" + apiserverapiv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/component-base/featuregate" +) + +var configScheme = runtime.NewScheme() + +func init() { + utilruntime.Must(apiserverapi.AddToScheme(configScheme)) + utilruntime.Must(apiserverapiv1alpha1.AddToScheme(configScheme)) + utilruntime.Must(apiserverapiv1.AddToScheme(configScheme)) +} + +// AdmissionOptions holds the admission options +type AdmissionOptions struct { + // RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default + RecommendedPluginOrder []string + // DefaultOffPlugins is a set of plugin names that is disabled by default + DefaultOffPlugins sets.String + + // EnablePlugins indicates plugins to be enabled passed through `--enable-admission-plugins`. + EnablePlugins []string + // DisablePlugins indicates plugins to be disabled passed through `--disable-admission-plugins`. + DisablePlugins []string + // ConfigFile is the file path with admission control configuration. + ConfigFile string + // Plugins contains all registered plugins. + Plugins *admission.Plugins + // Decorators is a list of admission decorator to wrap around the admission plugins + Decorators admission.Decorators +} + +// NewAdmissionOptions creates a new instance of AdmissionOptions +// Note: +// In addition it calls RegisterAllAdmissionPlugins to register +// all generic admission plugins. +// +// Provides the list of RecommendedPluginOrder that holds sane values +// that can be used by servers that don't care about admission chain. +// Servers that do care can overwrite/append that field after creation. +func NewAdmissionOptions() *AdmissionOptions { + options := &AdmissionOptions{ + Plugins: admission.NewPlugins(), + Decorators: admission.Decorators{admission.DecoratorFunc(admissionmetrics.WithControllerMetrics)}, + // This list is mix of mutating admission plugins and validating + // admission plugins. The apiserver always runs the validating ones + // after all the mutating ones, so their relative order in this list + // doesn't matter. + RecommendedPluginOrder: []string{lifecycle.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName}, + DefaultOffPlugins: sets.NewString(), + } + server.RegisterAllAdmissionPlugins(options.Plugins) + return options +} + +// AddFlags adds flags related to admission for a specific APIServer to the specified FlagSet +func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) { + if a == nil { + return + } + + fs.StringSliceVar(&a.EnablePlugins, "enable-admission-plugins", a.EnablePlugins, ""+ + "admission plugins that should be enabled in addition to default enabled ones ("+ + strings.Join(a.defaultEnabledPluginNames(), ", ")+"). "+ + "Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+ + "The order of plugins in this flag does not matter.") + fs.StringSliceVar(&a.DisablePlugins, "disable-admission-plugins", a.DisablePlugins, ""+ + "admission plugins that should be disabled although they are in the default enabled plugins list ("+ + strings.Join(a.defaultEnabledPluginNames(), ", ")+"). "+ + "Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+ + "The order of plugins in this flag does not matter.") + fs.StringVar(&a.ConfigFile, "admission-control-config-file", a.ConfigFile, + "File with admission control configuration.") +} + +// ApplyTo adds the admission chain to the server configuration. +// In case admission plugin names were not provided by a cluster-admin they will be prepared from the recommended/default values. +// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers +// note this method uses: +// genericconfig.Authorizer +func (a *AdmissionOptions) ApplyTo( + c *server.Config, + informers informers.SharedInformerFactory, + kubeAPIServerClientConfig *rest.Config, + features featuregate.FeatureGate, + pluginInitializers ...admission.PluginInitializer, +) error { + if a == nil { + return nil + } + + // Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig. + if informers == nil { + return fmt.Errorf("admission depends on a Kubernetes core API shared informer, it cannot be nil") + } + + pluginNames := a.enabledPluginNames() + + pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, configScheme) + if err != nil { + return fmt.Errorf("failed to read plugin config: %v", err) + } + + clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig) + if err != nil { + return err + } + genericInitializer := initializer.New(clientset, informers, c.Authorization.Authorizer, features) + initializersChain := admission.PluginInitializers{} + pluginInitializers = append(pluginInitializers, genericInitializer) + initializersChain = append(initializersChain, pluginInitializers...) + + admissionChain, err := a.Plugins.NewFromPlugins(pluginNames, pluginsConfigProvider, initializersChain, a.Decorators) + if err != nil { + return err + } + + c.AdmissionControl = admissionmetrics.WithStepMetrics(admissionChain) + return nil +} + +// Validate verifies flags passed to AdmissionOptions. +func (a *AdmissionOptions) Validate() []error { + if a == nil { + return nil + } + + errs := []error{} + + registeredPlugins := sets.NewString(a.Plugins.Registered()...) + for _, name := range a.EnablePlugins { + if !registeredPlugins.Has(name) { + errs = append(errs, fmt.Errorf("enable-admission-plugins plugin %q is unknown", name)) + } + } + + for _, name := range a.DisablePlugins { + if !registeredPlugins.Has(name) { + errs = append(errs, fmt.Errorf("disable-admission-plugins plugin %q is unknown", name)) + } + } + + enablePlugins := sets.NewString(a.EnablePlugins...) + disablePlugins := sets.NewString(a.DisablePlugins...) + if len(enablePlugins.Intersection(disablePlugins).List()) > 0 { + errs = append(errs, fmt.Errorf("%v in enable-admission-plugins and disable-admission-plugins "+ + "overlapped", enablePlugins.Intersection(disablePlugins).List())) + } + + // Verify RecommendedPluginOrder. + recommendPlugins := sets.NewString(a.RecommendedPluginOrder...) + intersections := registeredPlugins.Intersection(recommendPlugins) + if !intersections.Equal(recommendPlugins) { + // Developer error, this should never run in. + errs = append(errs, fmt.Errorf("plugins %v in RecommendedPluginOrder are not registered", + recommendPlugins.Difference(intersections).List())) + } + if !intersections.Equal(registeredPlugins) { + // Developer error, this should never run in. + errs = append(errs, fmt.Errorf("plugins %v registered are not in RecommendedPluginOrder", + registeredPlugins.Difference(intersections).List())) + } + + return errs +} + +// enabledPluginNames makes use of RecommendedPluginOrder, DefaultOffPlugins, +// EnablePlugins, DisablePlugins fields +// to prepare a list of ordered plugin names that are enabled. +func (a *AdmissionOptions) enabledPluginNames() []string { + allOffPlugins := append(a.DefaultOffPlugins.List(), a.DisablePlugins...) + disabledPlugins := sets.NewString(allOffPlugins...) + enabledPlugins := sets.NewString(a.EnablePlugins...) + disabledPlugins = disabledPlugins.Difference(enabledPlugins) + + orderedPlugins := []string{} + for _, plugin := range a.RecommendedPluginOrder { + if !disabledPlugins.Has(plugin) { + orderedPlugins = append(orderedPlugins, plugin) + } + } + + return orderedPlugins +} + +//Return names of plugins which are enabled by default +func (a *AdmissionOptions) defaultEnabledPluginNames() []string { + defaultOnPluginNames := []string{} + for _, pluginName := range a.RecommendedPluginOrder { + if !a.DefaultOffPlugins.Has(pluginName) { + defaultOnPluginNames = append(defaultOnPluginNames, pluginName) + } + } + + return defaultOnPluginNames +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go new file mode 100644 index 000000000..794e89ded --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go @@ -0,0 +1,115 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "strings" + + "github.com/spf13/pflag" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/resourceconfig" + serverstore "k8s.io/apiserver/pkg/server/storage" + cliflag "k8s.io/component-base/cli/flag" +) + +// APIEnablementOptions contains the options for which resources to turn on and off. +// Given small aggregated API servers, this option isn't required for "normal" API servers +type APIEnablementOptions struct { + RuntimeConfig cliflag.ConfigurationMap +} + +func NewAPIEnablementOptions() *APIEnablementOptions { + return &APIEnablementOptions{ + RuntimeConfig: make(cliflag.ConfigurationMap), + } +} + +// AddFlags adds flags for a specific APIServer to the specified FlagSet +func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) { + fs.Var(&s.RuntimeConfig, "runtime-config", ""+ + "A set of key=value pairs that enable or disable built-in APIs. Supported options are:\n"+ + "v1=true|false for the core API group\n"+ + "/=true|false for a specific API group and version (e.g. apps/v1=true)\n"+ + "api/all=true|false controls all API versions\n"+ + "api/ga=true|false controls all API versions of the form v[0-9]+\n"+ + "api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+\n"+ + "api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+\n"+ + "api/legacy is deprecated, and will be removed in a future version") +} + +// Validate validates RuntimeConfig with a list of registries. +// Usually this list only has one element, the apiserver registry of the process. +// But in the advanced (and usually not recommended) case of delegated apiservers there can be more. +// Validate will filter out the known groups of each registry. +// If anything is left over after that, an error is returned. +func (s *APIEnablementOptions) Validate(registries ...GroupRegisty) []error { + if s == nil { + return nil + } + + errors := []error{} + if s.RuntimeConfig[resourceconfig.APIAll] == "false" && len(s.RuntimeConfig) == 1 { + // Do not allow only set api/all=false, in such case apiserver startup has no meaning. + return append(errors, fmt.Errorf("invalid key with only %v=false", resourceconfig.APIAll)) + } + + groups, err := resourceconfig.ParseGroups(s.RuntimeConfig) + if err != nil { + return append(errors, err) + } + + for _, registry := range registries { + // filter out known groups + groups = unknownGroups(groups, registry) + } + if len(groups) != 0 { + errors = append(errors, fmt.Errorf("unknown api groups %s", strings.Join(groups, ","))) + } + + return errors +} + +// ApplyTo override MergedResourceConfig with defaults and registry +func (s *APIEnablementOptions) ApplyTo(c *server.Config, defaultResourceConfig *serverstore.ResourceConfig, registry resourceconfig.GroupVersionRegistry) error { + + if s == nil { + return nil + } + + mergedResourceConfig, err := resourceconfig.MergeAPIResourceConfigs(defaultResourceConfig, s.RuntimeConfig, registry) + c.MergedResourceConfig = mergedResourceConfig + + return err +} + +func unknownGroups(groups []string, registry GroupRegisty) []string { + unknownGroups := []string{} + for _, group := range groups { + if !registry.IsGroupRegistered(group) { + unknownGroups = append(unknownGroups, group) + } + } + return unknownGroups +} + +// GroupRegisty provides a method to check whether given group is registered. +type GroupRegisty interface { + // IsRegistered returns true if given group is registered. + IsGroupRegistered(group string) bool +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/audit.go b/vendor/k8s.io/apiserver/pkg/server/options/audit.go new file mode 100644 index 000000000..6e062c987 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/audit.go @@ -0,0 +1,622 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/spf13/pflag" + "gopkg.in/natefinch/lumberjack.v2" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/sets" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" + auditv1alpha1 "k8s.io/apiserver/pkg/apis/audit/v1alpha1" + auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/apiserver/pkg/util/webhook" + pluginbuffered "k8s.io/apiserver/plugin/pkg/audit/buffered" + pluginlog "k8s.io/apiserver/plugin/pkg/audit/log" + plugintruncate "k8s.io/apiserver/plugin/pkg/audit/truncate" + pluginwebhook "k8s.io/apiserver/plugin/pkg/audit/webhook" +) + +const ( + // Default configuration values for ModeBatch. + defaultBatchBufferSize = 10000 // Buffer up to 10000 events before starting discarding. + // These batch parameters are only used by the webhook backend. + defaultBatchMaxSize = 400 // Only send up to 400 events at a time. + defaultBatchMaxWait = 30 * time.Second // Send events at least twice a minute. + defaultBatchThrottleQPS = 10 // Limit the send rate by 10 QPS. + defaultBatchThrottleBurst = 15 // Allow up to 15 QPS burst. +) + +func appendBackend(existing, newBackend audit.Backend) audit.Backend { + if existing == nil { + return newBackend + } + if newBackend == nil { + return existing + } + return audit.Union(existing, newBackend) +} + +type AuditOptions struct { + // Policy configuration file for filtering audit events that are captured. + // If unspecified, a default is provided. + PolicyFile string + + // Plugin options + LogOptions AuditLogOptions + WebhookOptions AuditWebhookOptions +} + +const ( + // ModeBatch indicates that the audit backend should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + ModeBatch = "batch" + // ModeBlocking causes the audit backend to block on every attempt to process + // a set of events. This causes requests to the API server to wait for the + // flush before sending a response. + ModeBlocking = "blocking" + // ModeBlockingStrict is the same as ModeBlocking, except when there is + // a failure during audit logging at RequestReceived stage, the whole + // request to apiserver will fail. + ModeBlockingStrict = "blocking-strict" +) + +// AllowedModes is the modes known for audit backends. +var AllowedModes = []string{ + ModeBatch, + ModeBlocking, + ModeBlockingStrict, +} + +type AuditBatchOptions struct { + // Should the backend asynchronous batch events to the webhook backend or + // should the backend block responses? + // + // Defaults to asynchronous batch events. + Mode string + // Configuration for batching backend. Only used in batch mode. + BatchConfig pluginbuffered.BatchConfig +} + +type AuditTruncateOptions struct { + // Whether truncating is enabled or not. + Enabled bool + + // Truncating configuration. + TruncateConfig plugintruncate.Config +} + +// AuditLogOptions determines the output of the structured audit log by default. +type AuditLogOptions struct { + Path string + MaxAge int + MaxBackups int + MaxSize int + Format string + Compress bool + + BatchOptions AuditBatchOptions + TruncateOptions AuditTruncateOptions + + // API group version used for serializing audit events. + GroupVersionString string +} + +// AuditWebhookOptions control the webhook configuration for audit events. +type AuditWebhookOptions struct { + ConfigFile string + InitialBackoff time.Duration + + BatchOptions AuditBatchOptions + TruncateOptions AuditTruncateOptions + + // API group version used for serializing audit events. + GroupVersionString string +} + +// AuditDynamicOptions control the configuration of dynamic backends for audit events +type AuditDynamicOptions struct { + // Enabled tells whether the dynamic audit capability is enabled. + Enabled bool + + // Configuration for batching backend. This is currently only used as an override + // for integration tests + BatchConfig *pluginbuffered.BatchConfig +} + +func NewAuditOptions() *AuditOptions { + return &AuditOptions{ + WebhookOptions: AuditWebhookOptions{ + InitialBackoff: pluginwebhook.DefaultInitialBackoffDelay, + BatchOptions: AuditBatchOptions{ + Mode: ModeBatch, + BatchConfig: defaultWebhookBatchConfig(), + }, + TruncateOptions: NewAuditTruncateOptions(), + GroupVersionString: "audit.k8s.io/v1", + }, + LogOptions: AuditLogOptions{ + Format: pluginlog.FormatJson, + BatchOptions: AuditBatchOptions{ + Mode: ModeBlocking, + BatchConfig: defaultLogBatchConfig(), + }, + TruncateOptions: NewAuditTruncateOptions(), + GroupVersionString: "audit.k8s.io/v1", + }, + } +} + +func NewAuditTruncateOptions() AuditTruncateOptions { + return AuditTruncateOptions{ + Enabled: false, + TruncateConfig: plugintruncate.Config{ + MaxBatchSize: 10 * 1024 * 1024, // 10MB + MaxEventSize: 100 * 1024, // 100KB + }, + } +} + +// Validate checks invalid config combination +func (o *AuditOptions) Validate() []error { + if o == nil { + return nil + } + + var allErrors []error + allErrors = append(allErrors, o.LogOptions.Validate()...) + allErrors = append(allErrors, o.WebhookOptions.Validate()...) + + return allErrors +} + +func validateBackendMode(pluginName string, mode string) error { + for _, m := range AllowedModes { + if m == mode { + return nil + } + } + return fmt.Errorf("invalid audit %s mode %s, allowed modes are %q", pluginName, mode, strings.Join(AllowedModes, ",")) +} + +func validateBackendBatchOptions(pluginName string, options AuditBatchOptions) error { + if err := validateBackendMode(pluginName, options.Mode); err != nil { + return err + } + if options.Mode != ModeBatch { + // Don't validate the unused options. + return nil + } + config := options.BatchConfig + if config.BufferSize <= 0 { + return fmt.Errorf("invalid audit batch %s buffer size %v, must be a positive number", pluginName, config.BufferSize) + } + if config.MaxBatchSize <= 0 { + return fmt.Errorf("invalid audit batch %s max batch size %v, must be a positive number", pluginName, config.MaxBatchSize) + } + if config.ThrottleEnable { + if config.ThrottleQPS <= 0 { + return fmt.Errorf("invalid audit batch %s throttle QPS %v, must be a positive number", pluginName, config.ThrottleQPS) + } + if config.ThrottleBurst <= 0 { + return fmt.Errorf("invalid audit batch %s throttle burst %v, must be a positive number", pluginName, config.ThrottleBurst) + } + } + return nil +} + +var knownGroupVersions = []schema.GroupVersion{ + auditv1alpha1.SchemeGroupVersion, + auditv1beta1.SchemeGroupVersion, + auditv1.SchemeGroupVersion, +} + +func validateGroupVersionString(groupVersion string) error { + gv, err := schema.ParseGroupVersion(groupVersion) + if err != nil { + return err + } + if !knownGroupVersion(gv) { + return fmt.Errorf("invalid group version, allowed versions are %q", knownGroupVersions) + } + if gv != auditv1.SchemeGroupVersion { + klog.Warningf("%q is deprecated and will be removed in a future release, use %q instead", gv, auditv1.SchemeGroupVersion) + } + return nil +} + +func knownGroupVersion(gv schema.GroupVersion) bool { + for _, knownGv := range knownGroupVersions { + if gv == knownGv { + return true + } + } + return false +} + +func (o *AuditOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.PolicyFile, "audit-policy-file", o.PolicyFile, + "Path to the file that defines the audit policy configuration.") + + o.LogOptions.AddFlags(fs) + o.LogOptions.BatchOptions.AddFlags(pluginlog.PluginName, fs) + o.LogOptions.TruncateOptions.AddFlags(pluginlog.PluginName, fs) + o.WebhookOptions.AddFlags(fs) + o.WebhookOptions.BatchOptions.AddFlags(pluginwebhook.PluginName, fs) + o.WebhookOptions.TruncateOptions.AddFlags(pluginwebhook.PluginName, fs) +} + +func (o *AuditOptions) ApplyTo( + c *server.Config, +) error { + if o == nil { + return nil + } + if c == nil { + return fmt.Errorf("server config must be non-nil") + } + + // 1. Build policy checker + checker, err := o.newPolicyChecker() + if err != nil { + return err + } + + // 2. Build log backend + var logBackend audit.Backend + w, err := o.LogOptions.getWriter() + if err != nil { + return err + } + if w != nil { + if checker == nil { + klog.V(2).Info("No audit policy file provided, no events will be recorded for log backend") + } else { + logBackend = o.LogOptions.newBackend(w) + } + } + + // 3. Build webhook backend + var webhookBackend audit.Backend + if o.WebhookOptions.enabled() { + if checker == nil { + klog.V(2).Info("No audit policy file provided, no events will be recorded for webhook backend") + } else { + if c.EgressSelector != nil { + var egressDialer utilnet.DialFunc + egressDialer, err = c.EgressSelector.Lookup(egressselector.ControlPlane.AsNetworkContext()) + if err != nil { + return err + } + webhookBackend, err = o.WebhookOptions.newUntruncatedBackend(egressDialer) + } else { + webhookBackend, err = o.WebhookOptions.newUntruncatedBackend(nil) + } + if err != nil { + return err + } + } + } + + groupVersion, err := schema.ParseGroupVersion(o.WebhookOptions.GroupVersionString) + if err != nil { + return err + } + + // 4. Apply dynamic options. + var dynamicBackend audit.Backend + if webhookBackend != nil { + // if only webhook is enabled wrap it in the truncate options + dynamicBackend = o.WebhookOptions.TruncateOptions.wrapBackend(webhookBackend, groupVersion) + } + + // 5. Set the policy checker + c.AuditPolicyChecker = checker + + // 6. Join the log backend with the webhooks + c.AuditBackend = appendBackend(logBackend, dynamicBackend) + + if c.AuditBackend != nil { + klog.V(2).Infof("Using audit backend: %s", c.AuditBackend) + } + return nil +} + +func (o *AuditOptions) newPolicyChecker() (policy.Checker, error) { + if o.PolicyFile == "" { + return nil, nil + } + + p, err := policy.LoadPolicyFromFile(o.PolicyFile) + if err != nil { + return nil, fmt.Errorf("loading audit policy file: %v", err) + } + return policy.NewChecker(p), nil +} + +func (o *AuditBatchOptions) AddFlags(pluginName string, fs *pflag.FlagSet) { + fs.StringVar(&o.Mode, fmt.Sprintf("audit-%s-mode", pluginName), o.Mode, + "Strategy for sending audit events. Blocking indicates sending events should block"+ + " server responses. Batch causes the backend to buffer and write events"+ + " asynchronously. Known modes are "+strings.Join(AllowedModes, ",")+".") + fs.IntVar(&o.BatchConfig.BufferSize, fmt.Sprintf("audit-%s-batch-buffer-size", pluginName), + o.BatchConfig.BufferSize, "The size of the buffer to store events before "+ + "batching and writing. Only used in batch mode.") + fs.IntVar(&o.BatchConfig.MaxBatchSize, fmt.Sprintf("audit-%s-batch-max-size", pluginName), + o.BatchConfig.MaxBatchSize, "The maximum size of a batch. Only used in batch mode.") + fs.DurationVar(&o.BatchConfig.MaxBatchWait, fmt.Sprintf("audit-%s-batch-max-wait", pluginName), + o.BatchConfig.MaxBatchWait, "The amount of time to wait before force writing the "+ + "batch that hadn't reached the max size. Only used in batch mode.") + fs.BoolVar(&o.BatchConfig.ThrottleEnable, fmt.Sprintf("audit-%s-batch-throttle-enable", pluginName), + o.BatchConfig.ThrottleEnable, "Whether batching throttling is enabled. Only used in batch mode.") + fs.Float32Var(&o.BatchConfig.ThrottleQPS, fmt.Sprintf("audit-%s-batch-throttle-qps", pluginName), + o.BatchConfig.ThrottleQPS, "Maximum average number of batches per second. "+ + "Only used in batch mode.") + fs.IntVar(&o.BatchConfig.ThrottleBurst, fmt.Sprintf("audit-%s-batch-throttle-burst", pluginName), + o.BatchConfig.ThrottleBurst, "Maximum number of requests sent at the same "+ + "moment if ThrottleQPS was not utilized before. Only used in batch mode.") +} + +type ignoreErrorsBackend struct { + audit.Backend +} + +func (i *ignoreErrorsBackend) ProcessEvents(ev ...*auditinternal.Event) bool { + i.Backend.ProcessEvents(ev...) + return true +} + +func (i *ignoreErrorsBackend) String() string { + return fmt.Sprintf("ignoreErrors<%s>", i.Backend) +} + +func (o *AuditBatchOptions) wrapBackend(delegate audit.Backend) audit.Backend { + if o.Mode == ModeBlockingStrict { + return delegate + } + if o.Mode == ModeBlocking { + return &ignoreErrorsBackend{Backend: delegate} + } + return pluginbuffered.NewBackend(delegate, o.BatchConfig) +} + +func (o *AuditTruncateOptions) Validate(pluginName string) error { + config := o.TruncateConfig + if config.MaxEventSize <= 0 { + return fmt.Errorf("invalid audit truncate %s max event size %v, must be a positive number", pluginName, config.MaxEventSize) + } + if config.MaxBatchSize < config.MaxEventSize { + return fmt.Errorf("invalid audit truncate %s max batch size %v, must be greater than "+ + "max event size (%v)", pluginName, config.MaxBatchSize, config.MaxEventSize) + } + return nil +} + +func (o *AuditTruncateOptions) AddFlags(pluginName string, fs *pflag.FlagSet) { + fs.BoolVar(&o.Enabled, fmt.Sprintf("audit-%s-truncate-enabled", pluginName), + o.Enabled, "Whether event and batch truncating is enabled.") + fs.Int64Var(&o.TruncateConfig.MaxBatchSize, fmt.Sprintf("audit-%s-truncate-max-batch-size", pluginName), + o.TruncateConfig.MaxBatchSize, "Maximum size of the batch sent to the underlying backend. "+ + "Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, "+ + "it is split into several batches of smaller size.") + fs.Int64Var(&o.TruncateConfig.MaxEventSize, fmt.Sprintf("audit-%s-truncate-max-event-size", pluginName), + o.TruncateConfig.MaxEventSize, "Maximum size of the audit event sent to the underlying backend. "+ + "If the size of an event is greater than this number, first request and response are removed, and "+ + "if this doesn't reduce the size enough, event is discarded.") +} + +func (o *AuditTruncateOptions) wrapBackend(delegate audit.Backend, gv schema.GroupVersion) audit.Backend { + if !o.Enabled { + return delegate + } + return plugintruncate.NewBackend(delegate, o.TruncateConfig, gv) +} + +func (o *AuditLogOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.Path, "audit-log-path", o.Path, + "If set, all requests coming to the apiserver will be logged to this file. '-' means standard out.") + fs.IntVar(&o.MaxAge, "audit-log-maxage", o.MaxAge, + "The maximum number of days to retain old audit log files based on the timestamp encoded in their filename.") + fs.IntVar(&o.MaxBackups, "audit-log-maxbackup", o.MaxBackups, + "The maximum number of old audit log files to retain.") + fs.IntVar(&o.MaxSize, "audit-log-maxsize", o.MaxSize, + "The maximum size in megabytes of the audit log file before it gets rotated.") + fs.StringVar(&o.Format, "audit-log-format", o.Format, + "Format of saved audits. \"legacy\" indicates 1-line text format for each event."+ + " \"json\" indicates structured json format. Known formats are "+ + strings.Join(pluginlog.AllowedFormats, ",")+".") + fs.StringVar(&o.GroupVersionString, "audit-log-version", o.GroupVersionString, + "API group and version used for serializing audit events written to log.") + fs.BoolVar(&o.Compress, "audit-log-compress", o.Compress, "If set, the rotated log files will be compressed using gzip.") +} + +func (o *AuditLogOptions) Validate() []error { + // Check whether the log backend is enabled based on the options. + if !o.enabled() { + return nil + } + + var allErrors []error + + if err := validateBackendBatchOptions(pluginlog.PluginName, o.BatchOptions); err != nil { + allErrors = append(allErrors, err) + } + if err := o.TruncateOptions.Validate(pluginlog.PluginName); err != nil { + allErrors = append(allErrors, err) + } + + if err := validateGroupVersionString(o.GroupVersionString); err != nil { + allErrors = append(allErrors, err) + } + + // Check log format + if !sets.NewString(pluginlog.AllowedFormats...).Has(o.Format) { + allErrors = append(allErrors, fmt.Errorf("invalid audit log format %s, allowed formats are %q", o.Format, strings.Join(pluginlog.AllowedFormats, ","))) + } + + // Check validities of MaxAge, MaxBackups and MaxSize of log options, if file log backend is enabled. + if o.MaxAge < 0 { + allErrors = append(allErrors, fmt.Errorf("--audit-log-maxage %v can't be a negative number", o.MaxAge)) + } + if o.MaxBackups < 0 { + allErrors = append(allErrors, fmt.Errorf("--audit-log-maxbackup %v can't be a negative number", o.MaxBackups)) + } + if o.MaxSize < 0 { + allErrors = append(allErrors, fmt.Errorf("--audit-log-maxsize %v can't be a negative number", o.MaxSize)) + } + + return allErrors +} + +// Check whether the log backend is enabled based on the options. +func (o *AuditLogOptions) enabled() bool { + return o != nil && o.Path != "" +} + +func (o *AuditLogOptions) getWriter() (io.Writer, error) { + if !o.enabled() { + return nil, nil + } + + if o.Path == "-" { + return os.Stdout, nil + } + + if err := o.ensureLogFile(); err != nil { + return nil, fmt.Errorf("ensureLogFile: %w", err) + } + + return &lumberjack.Logger{ + Filename: o.Path, + MaxAge: o.MaxAge, + MaxBackups: o.MaxBackups, + MaxSize: o.MaxSize, + Compress: o.Compress, + }, nil +} + +func (o *AuditLogOptions) ensureLogFile() error { + mode := os.FileMode(0600) + f, err := os.OpenFile(o.Path, os.O_CREATE|os.O_APPEND|os.O_RDWR, mode) + if err != nil { + return err + } + return f.Close() +} + +func (o *AuditLogOptions) newBackend(w io.Writer) audit.Backend { + groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString) + log := pluginlog.NewBackend(w, o.Format, groupVersion) + log = o.BatchOptions.wrapBackend(log) + log = o.TruncateOptions.wrapBackend(log, groupVersion) + return log +} + +func (o *AuditWebhookOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.ConfigFile, "audit-webhook-config-file", o.ConfigFile, + "Path to a kubeconfig formatted file that defines the audit webhook configuration.") + fs.DurationVar(&o.InitialBackoff, "audit-webhook-initial-backoff", + o.InitialBackoff, "The amount of time to wait before retrying the first failed request.") + fs.DurationVar(&o.InitialBackoff, "audit-webhook-batch-initial-backoff", + o.InitialBackoff, "The amount of time to wait before retrying the first failed request.") + fs.MarkDeprecated("audit-webhook-batch-initial-backoff", + "Deprecated, use --audit-webhook-initial-backoff instead.") + fs.StringVar(&o.GroupVersionString, "audit-webhook-version", o.GroupVersionString, + "API group and version used for serializing audit events written to webhook.") +} + +func (o *AuditWebhookOptions) Validate() []error { + if !o.enabled() { + return nil + } + + var allErrors []error + if err := validateBackendBatchOptions(pluginwebhook.PluginName, o.BatchOptions); err != nil { + allErrors = append(allErrors, err) + } + if err := o.TruncateOptions.Validate(pluginwebhook.PluginName); err != nil { + allErrors = append(allErrors, err) + } + + if err := validateGroupVersionString(o.GroupVersionString); err != nil { + allErrors = append(allErrors, err) + } + return allErrors +} + +func (o *AuditWebhookOptions) enabled() bool { + return o != nil && o.ConfigFile != "" +} + +// newUntruncatedBackend returns a webhook backend without the truncate options applied +// this is done so that the same trucate backend can wrap both the webhook and dynamic backends +func (o *AuditWebhookOptions) newUntruncatedBackend(customDial utilnet.DialFunc) (audit.Backend, error) { + groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString) + webhook, err := pluginwebhook.NewBackend(o.ConfigFile, groupVersion, webhook.DefaultRetryBackoffWithInitialDelay(o.InitialBackoff), customDial) + if err != nil { + return nil, fmt.Errorf("initializing audit webhook: %v", err) + } + webhook = o.BatchOptions.wrapBackend(webhook) + return webhook, nil +} + +// defaultWebhookBatchConfig returns the default BatchConfig used by the Webhook backend. +func defaultWebhookBatchConfig() pluginbuffered.BatchConfig { + return pluginbuffered.BatchConfig{ + BufferSize: defaultBatchBufferSize, + MaxBatchSize: defaultBatchMaxSize, + MaxBatchWait: defaultBatchMaxWait, + + ThrottleEnable: true, + ThrottleQPS: defaultBatchThrottleQPS, + ThrottleBurst: defaultBatchThrottleBurst, + + AsyncDelegate: true, + } +} + +// defaultLogBatchConfig returns the default BatchConfig used by the Log backend. +func defaultLogBatchConfig() pluginbuffered.BatchConfig { + return pluginbuffered.BatchConfig{ + BufferSize: defaultBatchBufferSize, + // Batching is not useful for the log-file backend. + // MaxBatchWait ignored. + MaxBatchSize: 1, + ThrottleEnable: false, + // Asynchronous log threads just create lock contention. + AsyncDelegate: false, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go new file mode 100644 index 000000000..a82b4a739 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication.go @@ -0,0 +1,440 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/pflag" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + "k8s.io/apiserver/pkg/authentication/request/headerrequest" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/transport" + "k8s.io/klog/v2" + openapicommon "k8s.io/kube-openapi/pkg/common" +) + +// DefaultAuthWebhookRetryBackoff is the default backoff parameters for +// both authentication and authorization webhook used by the apiserver. +func DefaultAuthWebhookRetryBackoff() *wait.Backoff { + return &wait.Backoff{ + Duration: 500 * time.Millisecond, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + } +} + +type RequestHeaderAuthenticationOptions struct { + // ClientCAFile is the root certificate bundle to verify client certificates on incoming requests + // before trusting usernames in headers. + ClientCAFile string + + UsernameHeaders []string + GroupHeaders []string + ExtraHeaderPrefixes []string + AllowedNames []string +} + +func (s *RequestHeaderAuthenticationOptions) Validate() []error { + allErrors := []error{} + + if err := checkForWhiteSpaceOnly("requestheader-username-headers", s.UsernameHeaders...); err != nil { + allErrors = append(allErrors, err) + } + if err := checkForWhiteSpaceOnly("requestheader-group-headers", s.GroupHeaders...); err != nil { + allErrors = append(allErrors, err) + } + if err := checkForWhiteSpaceOnly("requestheader-extra-headers-prefix", s.ExtraHeaderPrefixes...); err != nil { + allErrors = append(allErrors, err) + } + if err := checkForWhiteSpaceOnly("requestheader-allowed-names", s.AllowedNames...); err != nil { + allErrors = append(allErrors, err) + } + + return allErrors +} + +func checkForWhiteSpaceOnly(flag string, headerNames ...string) error { + for _, headerName := range headerNames { + if len(strings.TrimSpace(headerName)) == 0 { + return fmt.Errorf("empty value in %q", flag) + } + } + + return nil +} + +func (s *RequestHeaderAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.StringSliceVar(&s.UsernameHeaders, "requestheader-username-headers", s.UsernameHeaders, ""+ + "List of request headers to inspect for usernames. X-Remote-User is common.") + + fs.StringSliceVar(&s.GroupHeaders, "requestheader-group-headers", s.GroupHeaders, ""+ + "List of request headers to inspect for groups. X-Remote-Group is suggested.") + + fs.StringSliceVar(&s.ExtraHeaderPrefixes, "requestheader-extra-headers-prefix", s.ExtraHeaderPrefixes, ""+ + "List of request header prefixes to inspect. X-Remote-Extra- is suggested.") + + fs.StringVar(&s.ClientCAFile, "requestheader-client-ca-file", s.ClientCAFile, ""+ + "Root certificate bundle to use to verify client certificates on incoming requests "+ + "before trusting usernames in headers specified by --requestheader-username-headers. "+ + "WARNING: generally do not depend on authorization being already done for incoming requests.") + + fs.StringSliceVar(&s.AllowedNames, "requestheader-allowed-names", s.AllowedNames, ""+ + "List of client certificate common names to allow to provide usernames in headers "+ + "specified by --requestheader-username-headers. If empty, any client certificate validated "+ + "by the authorities in --requestheader-client-ca-file is allowed.") +} + +// ToAuthenticationRequestHeaderConfig returns a RequestHeaderConfig config object for these options +// if necessary, nil otherwise. +func (s *RequestHeaderAuthenticationOptions) ToAuthenticationRequestHeaderConfig() (*authenticatorfactory.RequestHeaderConfig, error) { + if len(s.ClientCAFile) == 0 { + return nil, nil + } + + caBundleProvider, err := dynamiccertificates.NewDynamicCAContentFromFile("request-header", s.ClientCAFile) + if err != nil { + return nil, err + } + + return &authenticatorfactory.RequestHeaderConfig{ + UsernameHeaders: headerrequest.StaticStringSlice(s.UsernameHeaders), + GroupHeaders: headerrequest.StaticStringSlice(s.GroupHeaders), + ExtraHeaderPrefixes: headerrequest.StaticStringSlice(s.ExtraHeaderPrefixes), + CAContentProvider: caBundleProvider, + AllowedClientNames: headerrequest.StaticStringSlice(s.AllowedNames), + }, nil +} + +// ClientCertAuthenticationOptions provides different options for client cert auth. You should use `GetClientVerifyOptionFn` to +// get the verify options for your authenticator. +type ClientCertAuthenticationOptions struct { + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + ClientCA string + + // CAContentProvider are the options for verifying incoming connections using mTLS and directly assigning to users. + // Generally this is the CA bundle file used to authenticate client certificates + // If non-nil, this takes priority over the ClientCA file. + CAContentProvider dynamiccertificates.CAContentProvider +} + +// GetClientVerifyOptionFn provides verify options for your authenticator while respecting the preferred order of verifiers. +func (s *ClientCertAuthenticationOptions) GetClientCAContentProvider() (dynamiccertificates.CAContentProvider, error) { + if s.CAContentProvider != nil { + return s.CAContentProvider, nil + } + + if len(s.ClientCA) == 0 { + return nil, nil + } + + return dynamiccertificates.NewDynamicCAContentFromFile("client-ca-bundle", s.ClientCA) +} + +func (s *ClientCertAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&s.ClientCA, "client-ca-file", s.ClientCA, ""+ + "If set, any request presenting a client certificate signed by one of "+ + "the authorities in the client-ca-file is authenticated with an identity "+ + "corresponding to the CommonName of the client certificate.") +} + +// DelegatingAuthenticationOptions provides an easy way for composing API servers to delegate their authentication to +// the root kube API server. The API federator will act as +// a front proxy and direction connections will be able to delegate to the core kube API server +type DelegatingAuthenticationOptions struct { + // RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the + // TokenAccessReview.authentication.k8s.io endpoint for checking tokens. + RemoteKubeConfigFile string + // RemoteKubeConfigFileOptional is specifying whether not specifying the kubeconfig or + // a missing in-cluster config will be fatal. + RemoteKubeConfigFileOptional bool + + // CacheTTL is the length of time that a token authentication answer will be cached. + CacheTTL time.Duration + + ClientCert ClientCertAuthenticationOptions + RequestHeader RequestHeaderAuthenticationOptions + + // SkipInClusterLookup indicates missing authentication configuration should not be retrieved from the cluster configmap + SkipInClusterLookup bool + + // TolerateInClusterLookupFailure indicates failures to look up authentication configuration from the cluster configmap should not be fatal. + // Setting this can result in an authenticator that will reject all requests. + TolerateInClusterLookupFailure bool + + // WebhookRetryBackoff specifies the backoff parameters for the authentication webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff + + // TokenRequestTimeout specifies a time limit for requests made by the authorization webhook client. + // The default value is set to 10 seconds. + TokenRequestTimeout time.Duration + + // CustomRoundTripperFn allows for specifying a middleware function for custom HTTP behaviour for the authentication webhook client. + CustomRoundTripperFn transport.WrapperFunc +} + +func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions { + return &DelegatingAuthenticationOptions{ + // very low for responsiveness, but high enough to handle storms + CacheTTL: 10 * time.Second, + ClientCert: ClientCertAuthenticationOptions{}, + RequestHeader: RequestHeaderAuthenticationOptions{ + UsernameHeaders: []string{"x-remote-user"}, + GroupHeaders: []string{"x-remote-group"}, + ExtraHeaderPrefixes: []string{"x-remote-extra-"}, + }, + WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(), + TokenRequestTimeout: 10 * time.Second, + } +} + +// WithCustomRetryBackoff sets the custom backoff parameters for the authentication webhook retry logic. +func (s *DelegatingAuthenticationOptions) WithCustomRetryBackoff(backoff wait.Backoff) { + s.WebhookRetryBackoff = &backoff +} + +// WithRequestTimeout sets the given timeout for requests made by the authentication webhook client. +func (s *DelegatingAuthenticationOptions) WithRequestTimeout(timeout time.Duration) { + s.TokenRequestTimeout = timeout +} + +// WithCustomRoundTripper allows for specifying a middleware function for custom HTTP behaviour for the authentication webhook client. +func (s *DelegatingAuthenticationOptions) WithCustomRoundTripper(rt transport.WrapperFunc) { + s.CustomRoundTripperFn = rt +} + +func (s *DelegatingAuthenticationOptions) Validate() []error { + if s == nil { + return nil + } + + allErrors := []error{} + allErrors = append(allErrors, s.RequestHeader.Validate()...) + + if s.WebhookRetryBackoff != nil && s.WebhookRetryBackoff.Steps <= 0 { + allErrors = append(allErrors, fmt.Errorf("number of webhook retry attempts must be greater than 1, but is: %d", s.WebhookRetryBackoff.Steps)) + } + + return allErrors +} + +func (s *DelegatingAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + var optionalKubeConfigSentence string + if s.RemoteKubeConfigFileOptional { + optionalKubeConfigSentence = " This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster." + } + fs.StringVar(&s.RemoteKubeConfigFile, "authentication-kubeconfig", s.RemoteKubeConfigFile, ""+ + "kubeconfig file pointing at the 'core' kubernetes server with enough rights to create "+ + "tokenreviews.authentication.k8s.io."+optionalKubeConfigSentence) + + fs.DurationVar(&s.CacheTTL, "authentication-token-webhook-cache-ttl", s.CacheTTL, + "The duration to cache responses from the webhook token authenticator.") + + s.ClientCert.AddFlags(fs) + s.RequestHeader.AddFlags(fs) + + fs.BoolVar(&s.SkipInClusterLookup, "authentication-skip-lookup", s.SkipInClusterLookup, ""+ + "If false, the authentication-kubeconfig will be used to lookup missing authentication "+ + "configuration from the cluster.") + fs.BoolVar(&s.TolerateInClusterLookupFailure, "authentication-tolerate-lookup-failure", s.TolerateInClusterLookupFailure, ""+ + "If true, failures to look up missing authentication configuration from the cluster are not considered fatal. "+ + "Note that this can result in authentication that treats all requests as anonymous.") +} + +func (s *DelegatingAuthenticationOptions) ApplyTo(authenticationInfo *server.AuthenticationInfo, servingInfo *server.SecureServingInfo, openAPIConfig *openapicommon.Config) error { + if s == nil { + authenticationInfo.Authenticator = nil + return nil + } + + cfg := authenticatorfactory.DelegatingAuthenticatorConfig{ + Anonymous: true, + CacheTTL: s.CacheTTL, + WebhookRetryBackoff: s.WebhookRetryBackoff, + TokenAccessReviewTimeout: s.TokenRequestTimeout, + } + + client, err := s.getClient() + if err != nil { + return fmt.Errorf("failed to get delegated authentication kubeconfig: %v", err) + } + + // configure token review + if client != nil { + cfg.TokenAccessReviewClient = client.AuthenticationV1() + } + + // get the clientCA information + clientCASpecified := s.ClientCert != ClientCertAuthenticationOptions{} + var clientCAProvider dynamiccertificates.CAContentProvider + if clientCASpecified { + clientCAProvider, err = s.ClientCert.GetClientCAContentProvider() + if err != nil { + return fmt.Errorf("unable to load client CA provider: %v", err) + } + cfg.ClientCertificateCAContentProvider = clientCAProvider + if err = authenticationInfo.ApplyClientCert(cfg.ClientCertificateCAContentProvider, servingInfo); err != nil { + return fmt.Errorf("unable to assign client CA provider: %v", err) + } + + } else if !s.SkipInClusterLookup { + if client == nil { + klog.Warningf("No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + } else { + clientCAProvider, err = dynamiccertificates.NewDynamicCAFromConfigMapController("client-ca", authenticationConfigMapNamespace, authenticationConfigMapName, "client-ca-file", client) + if err != nil { + return fmt.Errorf("unable to load configmap based client CA file: %v", err) + } + cfg.ClientCertificateCAContentProvider = clientCAProvider + if err = authenticationInfo.ApplyClientCert(cfg.ClientCertificateCAContentProvider, servingInfo); err != nil { + return fmt.Errorf("unable to assign configmap based client CA file: %v", err) + } + + } + } + + requestHeaderCAFileSpecified := len(s.RequestHeader.ClientCAFile) > 0 + var requestHeaderConfig *authenticatorfactory.RequestHeaderConfig + if requestHeaderCAFileSpecified { + requestHeaderConfig, err = s.RequestHeader.ToAuthenticationRequestHeaderConfig() + if err != nil { + return fmt.Errorf("unable to create request header authentication config: %v", err) + } + + } else if !s.SkipInClusterLookup { + if client == nil { + klog.Warningf("No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + } else { + requestHeaderConfig, err = s.createRequestHeaderConfig(client) + if err != nil { + if s.TolerateInClusterLookupFailure { + klog.Warningf("Error looking up in-cluster authentication configuration: %v", err) + klog.Warning("Continuing without authentication configuration. This may treat all requests as anonymous.") + klog.Warning("To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false") + } else { + return fmt.Errorf("unable to load configmap based request-header-client-ca-file: %v", err) + } + } + } + } + if requestHeaderConfig != nil { + cfg.RequestHeaderConfig = requestHeaderConfig + if err = authenticationInfo.ApplyClientCert(cfg.RequestHeaderConfig.CAContentProvider, servingInfo); err != nil { + return fmt.Errorf("unable to load request-header-client-ca-file: %v", err) + } + } + + // create authenticator + authenticator, securityDefinitions, err := cfg.New() + if err != nil { + return err + } + authenticationInfo.Authenticator = authenticator + if openAPIConfig != nil { + openAPIConfig.SecurityDefinitions = securityDefinitions + } + + return nil +} + +const ( + authenticationConfigMapNamespace = metav1.NamespaceSystem + // authenticationConfigMapName is the name of ConfigMap in the kube-system namespace holding the root certificate + // bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified + // by --requestheader-username-headers. This is created in the cluster by the kube-apiserver. + // "WARNING: generally do not depend on authorization being already done for incoming requests.") + authenticationConfigMapName = "extension-apiserver-authentication" +) + +func (s *DelegatingAuthenticationOptions) createRequestHeaderConfig(client kubernetes.Interface) (*authenticatorfactory.RequestHeaderConfig, error) { + dynamicRequestHeaderProvider, err := newDynamicRequestHeaderController(client) + if err != nil { + return nil, fmt.Errorf("unable to create request header authentication config: %v", err) + } + + // look up authentication configuration in the cluster and in case of an err defer to authentication-tolerate-lookup-failure flag + if err := dynamicRequestHeaderProvider.RunOnce(); err != nil { + return nil, err + } + + return &authenticatorfactory.RequestHeaderConfig{ + CAContentProvider: dynamicRequestHeaderProvider, + UsernameHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.UsernameHeaders)), + GroupHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.GroupHeaders)), + ExtraHeaderPrefixes: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.ExtraHeaderPrefixes)), + AllowedClientNames: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.AllowedClientNames)), + }, nil +} + +// getClient returns a Kubernetes clientset. If s.RemoteKubeConfigFileOptional is true, nil will be returned +// if no kubeconfig is specified by the user and the in-cluster config is not found. +func (s *DelegatingAuthenticationOptions) getClient() (kubernetes.Interface, error) { + var clientConfig *rest.Config + var err error + if len(s.RemoteKubeConfigFile) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.RemoteKubeConfigFile} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + clientConfig, err = loader.ClientConfig() + } else { + // without the remote kubeconfig file, try to use the in-cluster config. Most addon API servers will + // use this path. If it is optional, ignore errors. + clientConfig, err = rest.InClusterConfig() + if err != nil && s.RemoteKubeConfigFileOptional { + if err != rest.ErrNotInCluster { + klog.Warningf("failed to read in-cluster kubeconfig for delegated authentication: %v", err) + } + return nil, nil + } + } + if err != nil { + return nil, fmt.Errorf("failed to get delegated authentication kubeconfig: %v", err) + } + + // set high qps/burst limits since this will effectively limit API server responsiveness + clientConfig.QPS = 200 + clientConfig.Burst = 400 + // do not set a timeout on the http client, instead use context for cancellation + // if multiple timeouts were set, the request will pick the smaller timeout to be applied, leaving other useless. + // + // see https://github.com/golang/go/blob/a937729c2c2f6950a32bc5cd0f5b88700882f078/src/net/http/client.go#L364 + if s.CustomRoundTripperFn != nil { + clientConfig.Wrap(s.CustomRoundTripperFn) + } + + return kubernetes.NewForConfig(clientConfig) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go b/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go new file mode 100644 index 000000000..e2beb5c23 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/authentication_dynamic_request_header.go @@ -0,0 +1,78 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/authentication/request/headerrequest" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/client-go/kubernetes" +) + +var _ dynamiccertificates.ControllerRunner = &DynamicRequestHeaderController{} +var _ dynamiccertificates.CAContentProvider = &DynamicRequestHeaderController{} + +var _ headerrequest.RequestHeaderAuthRequestProvider = &DynamicRequestHeaderController{} + +// DynamicRequestHeaderController combines DynamicCAFromConfigMapController and RequestHeaderAuthRequestController +// into one controller for dynamically filling RequestHeaderConfig struct +type DynamicRequestHeaderController struct { + *dynamiccertificates.ConfigMapCAController + *headerrequest.RequestHeaderAuthRequestController +} + +// newDynamicRequestHeaderController creates a new controller that implements DynamicRequestHeaderController +func newDynamicRequestHeaderController(client kubernetes.Interface) (*DynamicRequestHeaderController, error) { + requestHeaderCAController, err := dynamiccertificates.NewDynamicCAFromConfigMapController( + "client-ca", + authenticationConfigMapNamespace, + authenticationConfigMapName, + "requestheader-client-ca-file", + client) + if err != nil { + return nil, fmt.Errorf("unable to create DynamicCAFromConfigMap controller: %v", err) + } + + requestHeaderAuthRequestController := headerrequest.NewRequestHeaderAuthRequestController( + authenticationConfigMapName, + authenticationConfigMapNamespace, + client, + "requestheader-username-headers", + "requestheader-group-headers", + "requestheader-extra-headers-prefix", + "requestheader-allowed-names", + ) + return &DynamicRequestHeaderController{ + ConfigMapCAController: requestHeaderCAController, + RequestHeaderAuthRequestController: requestHeaderAuthRequestController, + }, nil +} + +func (c *DynamicRequestHeaderController) RunOnce() error { + errs := []error{} + errs = append(errs, c.ConfigMapCAController.RunOnce()) + errs = append(errs, c.RequestHeaderAuthRequestController.RunOnce()) + return errors.NewAggregate(errs) +} + +func (c *DynamicRequestHeaderController) Run(workers int, stopCh <-chan struct{}) { + go c.ConfigMapCAController.Run(workers, stopCh) + go c.RequestHeaderAuthRequestController.Run(workers, stopCh) + <-stopCh +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/authorization.go b/vendor/k8s.io/apiserver/pkg/server/options/authorization.go new file mode 100644 index 000000000..6a3f5a4bb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/authorization.go @@ -0,0 +1,243 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "time" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" + "k8s.io/apiserver/pkg/authorization/path" + "k8s.io/apiserver/pkg/authorization/union" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/transport" + "k8s.io/klog/v2" +) + +// DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to +// the root kube API server. +// WARNING: never assume that every authenticated incoming request already does authorization. +// The aggregator in the kube API server does this today, but this behaviour is not +// guaranteed in the future. +type DelegatingAuthorizationOptions struct { + // RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the + // SubjectAccessReview.authorization.k8s.io endpoint for checking tokens. + RemoteKubeConfigFile string + // RemoteKubeConfigFileOptional is specifying whether not specifying the kubeconfig or + // a missing in-cluster config will be fatal. + RemoteKubeConfigFileOptional bool + + // AllowCacheTTL is the length of time that a successful authorization response will be cached + AllowCacheTTL time.Duration + + // DenyCacheTTL is the length of time that an unsuccessful authorization response will be cached. + // You generally want more responsive, "deny, try again" flows. + DenyCacheTTL time.Duration + + // AlwaysAllowPaths are HTTP paths which are excluded from authorization. They can be plain + // paths or end in * in which case prefix-match is applied. A leading / is optional. + AlwaysAllowPaths []string + + // AlwaysAllowGroups are groups which are allowed to take any actions. In kube, this is system:masters. + AlwaysAllowGroups []string + + // ClientTimeout specifies a time limit for requests made by SubjectAccessReviews client. + // The default value is set to 10 seconds. + ClientTimeout time.Duration + + // WebhookRetryBackoff specifies the backoff parameters for the authorization webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff + + // CustomRoundTripperFn allows for specifying a middleware function for custom HTTP behaviour for the authorization webhook client. + CustomRoundTripperFn transport.WrapperFunc +} + +func NewDelegatingAuthorizationOptions() *DelegatingAuthorizationOptions { + return &DelegatingAuthorizationOptions{ + // very low for responsiveness, but high enough to handle storms + AllowCacheTTL: 10 * time.Second, + DenyCacheTTL: 10 * time.Second, + ClientTimeout: 10 * time.Second, + WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(), + // This allows the kubelet to always get health and readiness without causing an authorization check. + // This field can be cleared by callers if they don't want this behavior. + AlwaysAllowPaths: []string{"/healthz", "/readyz", "/livez"}, + // In an authorization call delegated to a kube-apiserver (the expected common-case), system:masters has full + // authority in a hard-coded authorizer. This means that our default can reasonably be to skip an authorization + // check for system:masters. + // This field can be cleared by callers if they don't want this behavior. + AlwaysAllowGroups: []string{"system:masters"}, + } +} + +// WithAlwaysAllowGroups appends the list of paths to AlwaysAllowGroups +func (s *DelegatingAuthorizationOptions) WithAlwaysAllowGroups(groups ...string) *DelegatingAuthorizationOptions { + s.AlwaysAllowGroups = append(s.AlwaysAllowGroups, groups...) + return s +} + +// WithAlwaysAllowPaths appends the list of paths to AlwaysAllowPaths +func (s *DelegatingAuthorizationOptions) WithAlwaysAllowPaths(paths ...string) *DelegatingAuthorizationOptions { + s.AlwaysAllowPaths = append(s.AlwaysAllowPaths, paths...) + return s +} + +// WithClientTimeout sets the given timeout for SAR client used by this authorizer +func (s *DelegatingAuthorizationOptions) WithClientTimeout(timeout time.Duration) { + s.ClientTimeout = timeout +} + +// WithCustomRetryBackoff sets the custom backoff parameters for the authorization webhook retry logic. +func (s *DelegatingAuthorizationOptions) WithCustomRetryBackoff(backoff wait.Backoff) { + s.WebhookRetryBackoff = &backoff +} + +// WithCustomRoundTripper allows for specifying a middleware function for custom HTTP behaviour for the authorization webhook client. +func (s *DelegatingAuthorizationOptions) WithCustomRoundTripper(rt transport.WrapperFunc) { + s.CustomRoundTripperFn = rt +} + +func (s *DelegatingAuthorizationOptions) Validate() []error { + if s == nil { + return nil + } + + allErrors := []error{} + if s.WebhookRetryBackoff != nil && s.WebhookRetryBackoff.Steps <= 0 { + allErrors = append(allErrors, fmt.Errorf("number of webhook retry attempts must be greater than 1, but is: %d", s.WebhookRetryBackoff.Steps)) + } + + return allErrors +} + +func (s *DelegatingAuthorizationOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + var optionalKubeConfigSentence string + if s.RemoteKubeConfigFileOptional { + optionalKubeConfigSentence = " This is optional. If empty, all requests not skipped by authorization are forbidden." + } + fs.StringVar(&s.RemoteKubeConfigFile, "authorization-kubeconfig", s.RemoteKubeConfigFile, + "kubeconfig file pointing at the 'core' kubernetes server with enough rights to create "+ + "subjectaccessreviews.authorization.k8s.io."+optionalKubeConfigSentence) + + fs.DurationVar(&s.AllowCacheTTL, "authorization-webhook-cache-authorized-ttl", + s.AllowCacheTTL, + "The duration to cache 'authorized' responses from the webhook authorizer.") + + fs.DurationVar(&s.DenyCacheTTL, + "authorization-webhook-cache-unauthorized-ttl", s.DenyCacheTTL, + "The duration to cache 'unauthorized' responses from the webhook authorizer.") + + fs.StringSliceVar(&s.AlwaysAllowPaths, "authorization-always-allow-paths", s.AlwaysAllowPaths, + "A list of HTTP paths to skip during authorization, i.e. these are authorized without "+ + "contacting the 'core' kubernetes server.") +} + +func (s *DelegatingAuthorizationOptions) ApplyTo(c *server.AuthorizationInfo) error { + if s == nil { + c.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer() + return nil + } + + client, err := s.getClient() + if err != nil { + return err + } + + c.Authorizer, err = s.toAuthorizer(client) + return err +} + +func (s *DelegatingAuthorizationOptions) toAuthorizer(client kubernetes.Interface) (authorizer.Authorizer, error) { + var authorizers []authorizer.Authorizer + + if len(s.AlwaysAllowGroups) > 0 { + authorizers = append(authorizers, authorizerfactory.NewPrivilegedGroups(s.AlwaysAllowGroups...)) + } + + if len(s.AlwaysAllowPaths) > 0 { + a, err := path.NewAuthorizer(s.AlwaysAllowPaths) + if err != nil { + return nil, err + } + authorizers = append(authorizers, a) + } + + if client == nil { + klog.Warning("No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.") + } else { + cfg := authorizerfactory.DelegatingAuthorizerConfig{ + SubjectAccessReviewClient: client.AuthorizationV1(), + AllowCacheTTL: s.AllowCacheTTL, + DenyCacheTTL: s.DenyCacheTTL, + WebhookRetryBackoff: s.WebhookRetryBackoff, + } + delegatedAuthorizer, err := cfg.New() + if err != nil { + return nil, err + } + authorizers = append(authorizers, delegatedAuthorizer) + } + + return union.New(authorizers...), nil +} + +func (s *DelegatingAuthorizationOptions) getClient() (kubernetes.Interface, error) { + var clientConfig *rest.Config + var err error + if len(s.RemoteKubeConfigFile) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.RemoteKubeConfigFile} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + clientConfig, err = loader.ClientConfig() + } else { + // without the remote kubeconfig file, try to use the in-cluster config. Most addon API servers will + // use this path. If it is optional, ignore errors. + clientConfig, err = rest.InClusterConfig() + if err != nil && s.RemoteKubeConfigFileOptional { + if err != rest.ErrNotInCluster { + klog.Warningf("failed to read in-cluster kubeconfig for delegated authorization: %v", err) + } + return nil, nil + } + } + if err != nil { + return nil, fmt.Errorf("failed to get delegated authorization kubeconfig: %v", err) + } + + // set high qps/burst limits since this will effectively limit API server responsiveness + clientConfig.QPS = 200 + clientConfig.Burst = 400 + clientConfig.Timeout = s.ClientTimeout + if s.CustomRoundTripperFn != nil { + clientConfig.Wrap(s.CustomRoundTripperFn) + } + + return kubernetes.NewForConfig(clientConfig) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go b/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go new file mode 100644 index 000000000..c1293980f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go @@ -0,0 +1,90 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "time" + + "github.com/spf13/pflag" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/util/feature" + clientgoinformers "k8s.io/client-go/informers" + clientgoclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/component-base/traces" +) + +// CoreAPIOptions contains options to configure the connection to a core API Kubernetes apiserver. +type CoreAPIOptions struct { + // CoreAPIKubeconfigPath is a filename for a kubeconfig file to contact the core API server with. + // If it is not set, the in cluster config is used. + CoreAPIKubeconfigPath string +} + +func NewCoreAPIOptions() *CoreAPIOptions { + return &CoreAPIOptions{} +} + +func (o *CoreAPIOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.CoreAPIKubeconfigPath, "kubeconfig", o.CoreAPIKubeconfigPath, + "kubeconfig file pointing at the 'core' kubernetes server.") +} + +func (o *CoreAPIOptions) ApplyTo(config *server.RecommendedConfig) error { + if o == nil { + return nil + } + + // create shared informer for Kubernetes APIs + var kubeconfig *rest.Config + var err error + if len(o.CoreAPIKubeconfigPath) > 0 { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: o.CoreAPIKubeconfigPath} + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + kubeconfig, err = loader.ClientConfig() + if err != nil { + return fmt.Errorf("failed to load kubeconfig at %q: %v", o.CoreAPIKubeconfigPath, err) + } + } else { + kubeconfig, err = rest.InClusterConfig() + if err != nil { + return err + } + } + if feature.DefaultFeatureGate.Enabled(features.APIServerTracing) { + kubeconfig.Wrap(traces.WrapperFor(config.TracerProvider)) + } + clientgoExternalClient, err := clientgoclientset.NewForConfig(kubeconfig) + if err != nil { + return fmt.Errorf("failed to create Kubernetes clientset: %v", err) + } + config.ClientConfig = kubeconfig + config.SharedInformerFactory = clientgoinformers.NewSharedInformerFactory(clientgoExternalClient, 10*time.Minute) + + return nil +} + +func (o *CoreAPIOptions) Validate() []error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go new file mode 100644 index 000000000..2b3afb0f1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/deprecated_insecure_serving.go @@ -0,0 +1,169 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net" + + "github.com/spf13/pflag" + + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/rest" +) + +// DeprecatedInsecureServingOptions are for creating an unauthenticated, unauthorized, insecure port. +// No one should be using these anymore. +// DEPRECATED: all insecure serving options are removed in a future version +type DeprecatedInsecureServingOptions struct { + BindAddress net.IP + BindPort int + // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", + // "tcp4", and "tcp6". + BindNetwork string + + // Listener is the secure server network listener. + // either Listener or BindAddress/BindPort/BindNetwork is set, + // if Listener is set, use it and omit BindAddress/BindPort/BindNetwork. + Listener net.Listener + + // ListenFunc can be overridden to create a custom listener, e.g. for mocking in tests. + // It defaults to options.CreateListener. + ListenFunc func(network, addr string, config net.ListenConfig) (net.Listener, int, error) +} + +// Validate ensures that the insecure port values within the range of the port. +func (s *DeprecatedInsecureServingOptions) Validate() []error { + if s == nil { + return nil + } + + errors := []error{} + + if s.BindPort < 0 || s.BindPort > 65535 { + errors = append(errors, fmt.Errorf("insecure port %v must be between 0 and 65535, inclusive. 0 for turning off insecure (HTTP) port", s.BindPort)) + } + + return errors +} + +// AddFlags adds flags related to insecure serving to the specified FlagSet. +func (s *DeprecatedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "insecure-bind-address", s.BindAddress, ""+ + "The IP address on which to serve the --insecure-port (set to 0.0.0.0 or :: for listening in all interfaces and IP families).") + // Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784 + fs.MarkDeprecated("insecure-bind-address", "This flag will be removed in a future version.") + fs.Lookup("insecure-bind-address").Hidden = false + + fs.IntVar(&s.BindPort, "insecure-port", s.BindPort, ""+ + "The port on which to serve unsecured, unauthenticated access.") + // Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784 + fs.MarkDeprecated("insecure-port", "This flag will be removed in a future version.") + fs.Lookup("insecure-port").Hidden = false +} + +// AddUnqualifiedFlags adds flags related to insecure serving without the --insecure prefix to the specified FlagSet. +func (s *DeprecatedInsecureServingOptions) AddUnqualifiedFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "address", s.BindAddress, + "The IP address on which to serve the insecure --port (set to '0.0.0.0' or '::' for listening in all interfaces and IP families).") + fs.MarkDeprecated("address", "see --bind-address instead.") + fs.Lookup("address").Hidden = false + + fs.IntVar(&s.BindPort, "port", s.BindPort, "The port on which to serve unsecured, unauthenticated access. Set to 0 to disable.") + fs.MarkDeprecated("port", "see --secure-port instead.") + fs.Lookup("port").Hidden = false +} + +// ApplyTo adds DeprecatedInsecureServingOptions to the insecureserverinfo and kube-controller manager configuration. +// Note: the double pointer allows to set the *DeprecatedInsecureServingInfo to nil without referencing the struct hosting this pointer. +func (s *DeprecatedInsecureServingOptions) ApplyTo(c **server.DeprecatedInsecureServingInfo) error { + if s == nil { + return nil + } + if s.BindPort <= 0 { + return nil + } + + if s.Listener == nil { + var err error + listen := CreateListener + if s.ListenFunc != nil { + listen = s.ListenFunc + } + addr := net.JoinHostPort(s.BindAddress.String(), fmt.Sprintf("%d", s.BindPort)) + s.Listener, s.BindPort, err = listen(s.BindNetwork, addr, net.ListenConfig{}) + if err != nil { + return fmt.Errorf("failed to create listener: %v", err) + } + } + + *c = &server.DeprecatedInsecureServingInfo{ + Listener: s.Listener, + } + + return nil +} + +// WithLoopback adds loopback functionality to the serving options. +func (o *DeprecatedInsecureServingOptions) WithLoopback() *DeprecatedInsecureServingOptionsWithLoopback { + return &DeprecatedInsecureServingOptionsWithLoopback{o} +} + +// DeprecatedInsecureServingOptionsWithLoopback adds loopback functionality to the DeprecatedInsecureServingOptions. +// DEPRECATED: all insecure serving options will be removed in a future version, however note that +// there are security concerns over how health checks can work here - see e.g. #43784 +type DeprecatedInsecureServingOptionsWithLoopback struct { + *DeprecatedInsecureServingOptions +} + +// ApplyTo fills up serving information in the server configuration. +func (s *DeprecatedInsecureServingOptionsWithLoopback) ApplyTo(insecureServingInfo **server.DeprecatedInsecureServingInfo, loopbackClientConfig **rest.Config) error { + if s == nil || s.DeprecatedInsecureServingOptions == nil || insecureServingInfo == nil { + return nil + } + + if err := s.DeprecatedInsecureServingOptions.ApplyTo(insecureServingInfo); err != nil { + return err + } + + if *insecureServingInfo == nil || loopbackClientConfig == nil { + return nil + } + + secureLoopbackClientConfig, err := (*insecureServingInfo).NewLoopbackClientConfig() + switch { + // if we failed and there's no fallback loopback client config, we need to fail + case err != nil && *loopbackClientConfig == nil: + return err + + // if we failed, but we already have a fallback loopback client config (usually insecure), allow it + case err != nil && *loopbackClientConfig != nil: + + default: + *loopbackClientConfig = secureLoopbackClientConfig + } + + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/doc.go b/vendor/k8s.io/apiserver/pkg/server/options/doc.go new file mode 100644 index 000000000..426336be0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package options is the public flags and options used by a generic api +// server. It takes a minimal set of dependencies and does not reference +// implementations, in order to ensure it may be reused by multiple components +// (such as CLI commands that wish to generate or validate config). +package options // import "k8s.io/apiserver/pkg/server/options" diff --git a/vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go new file mode 100644 index 000000000..c7c94e577 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/egress_selector.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/utils/path" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/egressselector" +) + +// EgressSelectorOptions holds the api server egress selector options. +// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/1281-network-proxy/README.md +type EgressSelectorOptions struct { + // ConfigFile is the file path with api-server egress selector configuration. + ConfigFile string +} + +// NewEgressSelectorOptions creates a new instance of EgressSelectorOptions +// +// The option is to point to a configuration file for egress/konnectivity. +// This determines which types of requests use egress/konnectivity and how they use it. +// If empty the API Server will attempt to connect directly using the network. +func NewEgressSelectorOptions() *EgressSelectorOptions { + return &EgressSelectorOptions{} +} + +// AddFlags adds flags related to admission for a specific APIServer to the specified FlagSet +func (o *EgressSelectorOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.ConfigFile, "egress-selector-config-file", o.ConfigFile, + "File with apiserver egress selector configuration.") +} + +// ApplyTo adds the egress selector settings to the server configuration. +// In case egress selector settings were not provided by a cluster-admin +// they will be prepared from the recommended/default/no-op values. +func (o *EgressSelectorOptions) ApplyTo(c *server.Config) error { + if o == nil { + return nil + } + + npConfig, err := egressselector.ReadEgressSelectorConfiguration(o.ConfigFile) + if err != nil { + return fmt.Errorf("failed to read egress selector config: %v", err) + } + errs := egressselector.ValidateEgressSelectorConfiguration(npConfig) + if len(errs) > 0 { + return fmt.Errorf("failed to validate egress selector configuration: %v", errs.ToAggregate()) + } + + cs, err := egressselector.NewEgressSelector(npConfig) + if err != nil { + return fmt.Errorf("failed to setup egress selector with config %#v: %v", npConfig, err) + } + c.EgressSelector = cs + return nil +} + +// Validate verifies flags passed to EgressSelectorOptions. +func (o *EgressSelectorOptions) Validate() []error { + if o == nil || o.ConfigFile == "" { + return nil + } + + errs := []error{} + + if exists, err := path.Exists(path.CheckFollowSymlink, o.ConfigFile); !exists || err != nil { + errs = append(errs, fmt.Errorf("egress-selector-config-file %s does not exist", o.ConfigFile)) + } + + return errs +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS new file mode 100644 index 000000000..71edc3ecd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-auth-encryption-at-rest-approvers +reviewers: +- sig-auth-encryption-at-rest-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go new file mode 100644 index 000000000..09659676a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -0,0 +1,376 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encryptionconfig + +import ( + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + apiserverconfig "k8s.io/apiserver/pkg/apis/config" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/apiserver/pkg/apis/config/validation" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/storage/value" + aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes" + "k8s.io/apiserver/pkg/storage/value/encrypt/envelope" + "k8s.io/apiserver/pkg/storage/value/encrypt/identity" + "k8s.io/apiserver/pkg/storage/value/encrypt/secretbox" +) + +const ( + aesCBCTransformerPrefixV1 = "k8s:enc:aescbc:v1:" + aesGCMTransformerPrefixV1 = "k8s:enc:aesgcm:v1:" + secretboxTransformerPrefixV1 = "k8s:enc:secretbox:v1:" + kmsTransformerPrefixV1 = "k8s:enc:kms:v1:" + kmsPluginHealthzNegativeTTL = 3 * time.Second + kmsPluginHealthzPositiveTTL = 20 * time.Second +) + +type kmsPluginHealthzResponse struct { + err error + received time.Time +} + +type kmsPluginProbe struct { + name string + ttl time.Duration + envelope.Service + lastResponse *kmsPluginHealthzResponse + l *sync.Mutex +} + +func (h *kmsPluginProbe) toHealthzCheck(idx int) healthz.HealthChecker { + return healthz.NamedCheck(fmt.Sprintf("kms-provider-%d", idx), func(r *http.Request) error { + return h.Check() + }) +} + +// GetKMSPluginHealthzCheckers extracts KMSPluginProbes from the EncryptionConfig. +func GetKMSPluginHealthzCheckers(filepath string) ([]healthz.HealthChecker, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, fmt.Errorf("error opening encryption provider configuration file %q: %v", filepath, err) + } + defer f.Close() + var result []healthz.HealthChecker + probes, err := getKMSPluginProbes(f) + if err != nil { + return nil, err + } + + for i, p := range probes { + probe := p + result = append(result, probe.toHealthzCheck(i)) + } + return result, nil +} + +func getKMSPluginProbes(reader io.Reader) ([]*kmsPluginProbe, error) { + var result []*kmsPluginProbe + + configFileContents, err := ioutil.ReadAll(reader) + if err != nil { + return result, fmt.Errorf("could not read content of encryption provider configuration: %v", err) + } + + config, err := loadConfig(configFileContents) + if err != nil { + return result, fmt.Errorf("error while parsing encrypiton provider configuration: %v", err) + } + + for _, r := range config.Resources { + for _, p := range r.Providers { + if p.KMS != nil { + s, err := envelope.NewGRPCService(p.KMS.Endpoint, p.KMS.Timeout.Duration) + if err != nil { + return nil, fmt.Errorf("could not configure KMS-Plugin's probe %q, error: %v", p.KMS.Name, err) + } + + result = append(result, &kmsPluginProbe{ + name: p.KMS.Name, + ttl: kmsPluginHealthzNegativeTTL, + Service: s, + l: &sync.Mutex{}, + lastResponse: &kmsPluginHealthzResponse{}, + }) + } + } + } + + return result, nil +} + +// Check encrypts and decrypts test data against KMS-Plugin's gRPC endpoint. +func (h *kmsPluginProbe) Check() error { + h.l.Lock() + defer h.l.Unlock() + + if (time.Since(h.lastResponse.received)) < h.ttl { + return h.lastResponse.err + } + + p, err := h.Service.Encrypt([]byte("ping")) + if err != nil { + h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} + h.ttl = kmsPluginHealthzNegativeTTL + return fmt.Errorf("failed to perform encrypt section of the healthz check for KMS Provider %s, error: %v", h.name, err) + } + + if _, err := h.Service.Decrypt(p); err != nil { + h.lastResponse = &kmsPluginHealthzResponse{err: err, received: time.Now()} + h.ttl = kmsPluginHealthzNegativeTTL + return fmt.Errorf("failed to perform decrypt section of the healthz check for KMS Provider %s, error: %v", h.name, err) + } + + h.lastResponse = &kmsPluginHealthzResponse{err: nil, received: time.Now()} + h.ttl = kmsPluginHealthzPositiveTTL + return nil +} + +// GetTransformerOverrides returns the transformer overrides by reading and parsing the encryption provider configuration file +func GetTransformerOverrides(filepath string) (map[schema.GroupResource]value.Transformer, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, fmt.Errorf("error opening encryption provider configuration file %q: %v", filepath, err) + } + defer f.Close() + + result, err := parseEncryptionConfiguration(f) + if err != nil { + return nil, fmt.Errorf("error while parsing encryption provider configuration file %q: %v", filepath, err) + } + return result, nil +} + +func parseEncryptionConfiguration(f io.Reader) (map[schema.GroupResource]value.Transformer, error) { + configFileContents, err := ioutil.ReadAll(f) + if err != nil { + return nil, fmt.Errorf("could not read contents: %v", err) + } + + config, err := loadConfig(configFileContents) + if err != nil { + return nil, fmt.Errorf("error while parsing file: %v", err) + } + + resourceToPrefixTransformer := map[schema.GroupResource][]value.PrefixTransformer{} + + // For each entry in the configuration + for _, resourceConfig := range config.Resources { + transformers, err := prefixTransformers(&resourceConfig) + if err != nil { + return nil, err + } + + // For each resource, create a list of providers to use + for _, resource := range resourceConfig.Resources { + gr := schema.ParseGroupResource(resource) + resourceToPrefixTransformer[gr] = append( + resourceToPrefixTransformer[gr], transformers...) + } + } + + result := map[schema.GroupResource]value.Transformer{} + for gr, transList := range resourceToPrefixTransformer { + result[gr] = value.NewMutableTransformer(value.NewPrefixTransformers(fmt.Errorf("no matching prefix found"), transList...)) + } + return result, nil + +} + +func loadConfig(data []byte) (*apiserverconfig.EncryptionConfiguration, error) { + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + apiserverconfig.AddToScheme(scheme) + apiserverconfigv1.AddToScheme(scheme) + + configObj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil) + if err != nil { + return nil, err + } + config, ok := configObj.(*apiserverconfig.EncryptionConfiguration) + if !ok { + return nil, fmt.Errorf("got unexpected config type: %v", gvk) + } + + return config, validation.ValidateEncryptionConfiguration(config).ToAggregate() +} + +// The factory to create kms service. This is to make writing test easier. +var envelopeServiceFactory = envelope.NewGRPCService + +func prefixTransformers(config *apiserverconfig.ResourceConfiguration) ([]value.PrefixTransformer, error) { + var result []value.PrefixTransformer + for _, provider := range config.Providers { + var ( + transformer value.PrefixTransformer + err error + ) + + switch { + case provider.AESGCM != nil: + transformer, err = aesPrefixTransformer(provider.AESGCM, aestransformer.NewGCMTransformer, aesGCMTransformerPrefixV1) + case provider.AESCBC != nil: + transformer, err = aesPrefixTransformer(provider.AESCBC, aestransformer.NewCBCTransformer, aesCBCTransformerPrefixV1) + case provider.Secretbox != nil: + transformer, err = secretboxPrefixTransformer(provider.Secretbox) + case provider.KMS != nil: + var envelopeService envelope.Service + envelopeService, err = envelopeServiceFactory(provider.KMS.Endpoint, provider.KMS.Timeout.Duration) + if err != nil { + return nil, fmt.Errorf("could not configure KMS plugin %q, error: %v", provider.KMS.Name, err) + } + + transformer, err = envelopePrefixTransformer(provider.KMS, envelopeService, kmsTransformerPrefixV1) + case provider.Identity != nil: + transformer = value.PrefixTransformer{ + Transformer: identity.NewEncryptCheckTransformer(), + Prefix: []byte{}, + } + default: + return nil, errors.New("provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity") + } + + if err != nil { + return result, err + } + result = append(result, transformer) + } + return result, nil +} + +type blockTransformerFunc func(cipher.Block) value.Transformer + +func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTransformerFunc, prefix string) (value.PrefixTransformer, error) { + var result value.PrefixTransformer + + if len(config.Keys) == 0 { + return result, fmt.Errorf("aes provider has no valid keys") + } + for _, key := range config.Keys { + if key.Name == "" { + return result, fmt.Errorf("key with invalid name provided") + } + if key.Secret == "" { + return result, fmt.Errorf("key %v has no provided secret", key.Name) + } + } + + keyTransformers := []value.PrefixTransformer{} + + for _, keyData := range config.Keys { + key, err := base64.StdEncoding.DecodeString(keyData.Secret) + if err != nil { + return result, fmt.Errorf("could not obtain secret for named key %s: %s", keyData.Name, err) + } + block, err := aes.NewCipher(key) + if err != nil { + return result, fmt.Errorf("error while creating cipher for named key %s: %s", keyData.Name, err) + } + + // Create a new PrefixTransformer for this key + keyTransformers = append(keyTransformers, + value.PrefixTransformer{ + Transformer: fn(block), + Prefix: []byte(keyData.Name + ":"), + }) + } + + // Create a prefixTransformer which can choose between these keys + keyTransformer := value.NewPrefixTransformers( + fmt.Errorf("no matching key was found for the provided AES transformer"), keyTransformers...) + + // Create a PrefixTransformer which shall later be put in a list with other providers + result = value.PrefixTransformer{ + Transformer: keyTransformer, + Prefix: []byte(prefix), + } + return result, nil +} + +func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration) (value.PrefixTransformer, error) { + var result value.PrefixTransformer + + if len(config.Keys) == 0 { + return result, fmt.Errorf("secretbox provider has no valid keys") + } + for _, key := range config.Keys { + if key.Name == "" { + return result, fmt.Errorf("key with invalid name provided") + } + if key.Secret == "" { + return result, fmt.Errorf("key %v has no provided secret", key.Name) + } + } + + keyTransformers := []value.PrefixTransformer{} + + for _, keyData := range config.Keys { + key, err := base64.StdEncoding.DecodeString(keyData.Secret) + if err != nil { + return result, fmt.Errorf("could not obtain secret for named key %s: %s", keyData.Name, err) + } + + if len(key) != 32 { + return result, fmt.Errorf("expected key size 32 for secretbox provider, got %v", len(key)) + } + + keyArray := [32]byte{} + copy(keyArray[:], key) + + // Create a new PrefixTransformer for this key + keyTransformers = append(keyTransformers, + value.PrefixTransformer{ + Transformer: secretbox.NewSecretboxTransformer(keyArray), + Prefix: []byte(keyData.Name + ":"), + }) + } + + // Create a prefixTransformer which can choose between these keys + keyTransformer := value.NewPrefixTransformers( + fmt.Errorf("no matching key was found for the provided Secretbox transformer"), keyTransformers...) + + // Create a PrefixTransformer which shall later be put in a list with other providers + result = value.PrefixTransformer{ + Transformer: keyTransformer, + Prefix: []byte(secretboxTransformerPrefixV1), + } + return result, nil +} + +func envelopePrefixTransformer(config *apiserverconfig.KMSConfiguration, envelopeService envelope.Service, prefix string) (value.PrefixTransformer, error) { + envelopeTransformer, err := envelope.NewEnvelopeTransformer(envelopeService, int(*config.CacheSize), aestransformer.NewCBCTransformer) + if err != nil { + return value.PrefixTransformer{}, err + } + return value.PrefixTransformer{ + Transformer: envelopeTransformer, + Prefix: []byte(prefix + config.Name + ":"), + }, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go new file mode 100644 index 000000000..d8b45b819 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -0,0 +1,352 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/registry/generic" + genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/apiserver/pkg/server/options/encryptionconfig" + serverstorage "k8s.io/apiserver/pkg/server/storage" + "k8s.io/apiserver/pkg/storage/storagebackend" + storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/apiserver/pkg/storage/value" + "k8s.io/klog/v2" +) + +type EtcdOptions struct { + // The value of Paging on StorageConfig will be overridden by the + // calculated feature gate value. + StorageConfig storagebackend.Config + EncryptionProviderConfigFilepath string + + EtcdServersOverrides []string + + // To enable protobuf as storage format, it is enough + // to set it to "application/vnd.kubernetes.protobuf". + DefaultStorageMediaType string + DeleteCollectionWorkers int + EnableGarbageCollection bool + + // Set EnableWatchCache to false to disable all watch caches + EnableWatchCache bool + // Set DefaultWatchCacheSize to zero to disable watch caches for those resources that have no explicit cache size set + DefaultWatchCacheSize int + // WatchCacheSizes represents override to a given resource + WatchCacheSizes []string +} + +var storageTypes = sets.NewString( + storagebackend.StorageTypeETCD3, +) + +func NewEtcdOptions(backendConfig *storagebackend.Config) *EtcdOptions { + options := &EtcdOptions{ + StorageConfig: *backendConfig, + DefaultStorageMediaType: "application/json", + DeleteCollectionWorkers: 1, + EnableGarbageCollection: true, + EnableWatchCache: true, + DefaultWatchCacheSize: 100, + } + options.StorageConfig.CountMetricPollPeriod = time.Minute + return options +} + +func (s *EtcdOptions) Validate() []error { + if s == nil { + return nil + } + + allErrors := []error{} + if len(s.StorageConfig.Transport.ServerList) == 0 { + allErrors = append(allErrors, fmt.Errorf("--etcd-servers must be specified")) + } + + if s.StorageConfig.Type != storagebackend.StorageTypeUnset && !storageTypes.Has(s.StorageConfig.Type) { + allErrors = append(allErrors, fmt.Errorf("--storage-backend invalid, allowed values: %s. If not specified, it will default to 'etcd3'", strings.Join(storageTypes.List(), ", "))) + } + + for _, override := range s.EtcdServersOverrides { + tokens := strings.Split(override, "#") + if len(tokens) != 2 { + allErrors = append(allErrors, fmt.Errorf("--etcd-servers-overrides invalid, must be of format: group/resource#servers, where servers are URLs, semicolon separated")) + continue + } + + apiresource := strings.Split(tokens[0], "/") + if len(apiresource) != 2 { + allErrors = append(allErrors, fmt.Errorf("--etcd-servers-overrides invalid, must be of format: group/resource#servers, where servers are URLs, semicolon separated")) + continue + } + + } + + return allErrors +} + +// AddEtcdFlags adds flags related to etcd storage for a specific APIServer to the specified FlagSet +func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.StringSliceVar(&s.EtcdServersOverrides, "etcd-servers-overrides", s.EtcdServersOverrides, ""+ + "Per-resource etcd servers overrides, comma separated. The individual override "+ + "format: group/resource#servers, where servers are URLs, semicolon separated. "+ + "Note that this applies only to resources compiled into this server binary. ") + + fs.StringVar(&s.DefaultStorageMediaType, "storage-media-type", s.DefaultStorageMediaType, ""+ + "The media type to use to store objects in storage. "+ + "Some resources or storage backends may only support a specific media type and will ignore this setting.") + fs.IntVar(&s.DeleteCollectionWorkers, "delete-collection-workers", s.DeleteCollectionWorkers, + "Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup.") + + fs.BoolVar(&s.EnableGarbageCollection, "enable-garbage-collector", s.EnableGarbageCollection, ""+ + "Enables the generic garbage collector. MUST be synced with the corresponding flag "+ + "of the kube-controller-manager.") + + fs.BoolVar(&s.EnableWatchCache, "watch-cache", s.EnableWatchCache, + "Enable watch caching in the apiserver") + + fs.IntVar(&s.DefaultWatchCacheSize, "default-watch-cache-size", s.DefaultWatchCacheSize, + "Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.") + + fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ + "Watch cache size settings for some resources (pods, nodes, etc.), comma separated. "+ + "The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), "+ + "group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, "+ + "and size is a number. It takes effect when watch-cache is enabled. "+ + "Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) "+ + "have system defaults set by heuristics, others default to default-watch-cache-size") + + fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, + "The storage backend for persistence. Options: 'etcd3' (default).") + + dummyCacheSize := 0 + fs.IntVar(&dummyCacheSize, "deserialization-cache-size", 0, "Number of deserialized json objects to cache in memory.") + fs.MarkDeprecated("deserialization-cache-size", "the deserialization cache was dropped in 1.13 with support for etcd2") + + fs.StringSliceVar(&s.StorageConfig.Transport.ServerList, "etcd-servers", s.StorageConfig.Transport.ServerList, + "List of etcd servers to connect with (scheme://ip:port), comma separated.") + + fs.StringVar(&s.StorageConfig.Prefix, "etcd-prefix", s.StorageConfig.Prefix, + "The prefix to prepend to all resource paths in etcd.") + + fs.StringVar(&s.StorageConfig.Transport.KeyFile, "etcd-keyfile", s.StorageConfig.Transport.KeyFile, + "SSL key file used to secure etcd communication.") + + fs.StringVar(&s.StorageConfig.Transport.CertFile, "etcd-certfile", s.StorageConfig.Transport.CertFile, + "SSL certification file used to secure etcd communication.") + + fs.StringVar(&s.StorageConfig.Transport.TrustedCAFile, "etcd-cafile", s.StorageConfig.Transport.TrustedCAFile, + "SSL Certificate Authority file used to secure etcd communication.") + + fs.StringVar(&s.EncryptionProviderConfigFilepath, "experimental-encryption-provider-config", s.EncryptionProviderConfigFilepath, + "The file containing configuration for encryption providers to be used for storing secrets in etcd") + fs.MarkDeprecated("experimental-encryption-provider-config", "use --encryption-provider-config.") + + fs.StringVar(&s.EncryptionProviderConfigFilepath, "encryption-provider-config", s.EncryptionProviderConfigFilepath, + "The file containing configuration for encryption providers to be used for storing secrets in etcd") + + fs.DurationVar(&s.StorageConfig.CompactionInterval, "etcd-compaction-interval", s.StorageConfig.CompactionInterval, + "The interval of compaction requests. If 0, the compaction request from apiserver is disabled.") + + fs.DurationVar(&s.StorageConfig.CountMetricPollPeriod, "etcd-count-metric-poll-period", s.StorageConfig.CountMetricPollPeriod, ""+ + "Frequency of polling etcd for number of resources per type. 0 disables the metric collection.") + + fs.DurationVar(&s.StorageConfig.DBMetricPollInterval, "etcd-db-metric-poll-interval", s.StorageConfig.DBMetricPollInterval, + "The interval of requests to poll etcd and update metric. 0 disables the metric collection") + + fs.DurationVar(&s.StorageConfig.HealthcheckTimeout, "etcd-healthcheck-timeout", s.StorageConfig.HealthcheckTimeout, + "The timeout to use when checking etcd health.") + + fs.Int64Var(&s.StorageConfig.LeaseManagerConfig.ReuseDurationSeconds, "lease-reuse-duration-seconds", s.StorageConfig.LeaseManagerConfig.ReuseDurationSeconds, + "The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer.") +} + +func (s *EtcdOptions) ApplyTo(c *server.Config) error { + if s == nil { + return nil + } + if err := s.addEtcdHealthEndpoint(c); err != nil { + return err + } + transformerOverrides := make(map[schema.GroupResource]value.Transformer) + if len(s.EncryptionProviderConfigFilepath) > 0 { + var err error + transformerOverrides, err = encryptionconfig.GetTransformerOverrides(s.EncryptionProviderConfigFilepath) + if err != nil { + return err + } + } + + c.RESTOptionsGetter = &SimpleRestOptionsFactory{ + Options: *s, + TransformerOverrides: transformerOverrides, + } + return nil +} + +func (s *EtcdOptions) ApplyWithStorageFactoryTo(factory serverstorage.StorageFactory, c *server.Config) error { + if err := s.addEtcdHealthEndpoint(c); err != nil { + return err + } + c.RESTOptionsGetter = &StorageFactoryRestOptionsFactory{Options: *s, StorageFactory: factory} + return nil +} + +func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error { + healthCheck, err := storagefactory.CreateHealthCheck(s.StorageConfig) + if err != nil { + return err + } + c.AddHealthChecks(healthz.NamedCheck("etcd", func(r *http.Request) error { + return healthCheck() + })) + + if s.EncryptionProviderConfigFilepath != "" { + kmsPluginHealthzChecks, err := encryptionconfig.GetKMSPluginHealthzCheckers(s.EncryptionProviderConfigFilepath) + if err != nil { + return err + } + c.AddHealthChecks(kmsPluginHealthzChecks...) + } + + return nil +} + +type SimpleRestOptionsFactory struct { + Options EtcdOptions + TransformerOverrides map[schema.GroupResource]value.Transformer +} + +func (f *SimpleRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) { + ret := generic.RESTOptions{ + StorageConfig: &f.Options.StorageConfig, + Decorator: generic.UndecoratedStorage, + EnableGarbageCollection: f.Options.EnableGarbageCollection, + DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, + ResourcePrefix: resource.Group + "/" + resource.Resource, + CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + } + if f.TransformerOverrides != nil { + if transformer, ok := f.TransformerOverrides[resource]; ok { + ret.StorageConfig.Transformer = transformer + } + } + if f.Options.EnableWatchCache { + sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) + if err != nil { + return generic.RESTOptions{}, err + } + size, ok := sizes[resource] + if ok && size > 0 { + klog.Warningf("Dropping watch-cache-size for %v - watchCache size is now dynamic", resource) + } + if ok && size <= 0 { + ret.Decorator = generic.UndecoratedStorage + } else { + ret.Decorator = genericregistry.StorageWithCacher() + } + } + return ret, nil +} + +type StorageFactoryRestOptionsFactory struct { + Options EtcdOptions + StorageFactory serverstorage.StorageFactory +} + +func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) { + storageConfig, err := f.StorageFactory.NewConfig(resource) + if err != nil { + return generic.RESTOptions{}, fmt.Errorf("unable to find storage destination for %v, due to %v", resource, err.Error()) + } + + ret := generic.RESTOptions{ + StorageConfig: storageConfig, + Decorator: generic.UndecoratedStorage, + DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, + EnableGarbageCollection: f.Options.EnableGarbageCollection, + ResourcePrefix: f.StorageFactory.ResourcePrefix(resource), + CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + } + if f.Options.EnableWatchCache { + sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) + if err != nil { + return generic.RESTOptions{}, err + } + size, ok := sizes[resource] + if ok && size > 0 { + klog.Warningf("Dropping watch-cache-size for %v - watchCache size is now dynamic", resource) + } + if ok && size <= 0 { + ret.Decorator = generic.UndecoratedStorage + } else { + ret.Decorator = genericregistry.StorageWithCacher() + } + } + + return ret, nil +} + +// ParseWatchCacheSizes turns a list of cache size values into a map of group resources +// to requested sizes. +func ParseWatchCacheSizes(cacheSizes []string) (map[schema.GroupResource]int, error) { + watchCacheSizes := make(map[schema.GroupResource]int) + for _, c := range cacheSizes { + tokens := strings.Split(c, "#") + if len(tokens) != 2 { + return nil, fmt.Errorf("invalid value of watch cache size: %s", c) + } + + size, err := strconv.Atoi(tokens[1]) + if err != nil { + return nil, fmt.Errorf("invalid size of watch cache size: %s", c) + } + if size < 0 { + return nil, fmt.Errorf("watch cache size cannot be negative: %s", c) + } + watchCacheSizes[schema.ParseGroupResource(tokens[0])] = size + } + return watchCacheSizes, nil +} + +// WriteWatchCacheSizes turns a map of cache size values into a list of string specifications. +func WriteWatchCacheSizes(watchCacheSizes map[schema.GroupResource]int) ([]string, error) { + var cacheSizes []string + + for resource, size := range watchCacheSizes { + if size < 0 { + return nil, fmt.Errorf("watch cache size cannot be negative for resource %s", resource) + } + cacheSizes = append(cacheSizes, fmt.Sprintf("%s#%d", resource.String(), size)) + } + return cacheSizes, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/feature.go b/vendor/k8s.io/apiserver/pkg/server/options/feature.go new file mode 100644 index 000000000..235635ea9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/feature.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apiserver/pkg/server" +) + +type FeatureOptions struct { + EnableProfiling bool + EnableContentionProfiling bool +} + +func NewFeatureOptions() *FeatureOptions { + defaults := server.NewConfig(serializer.CodecFactory{}) + + return &FeatureOptions{ + EnableProfiling: defaults.EnableProfiling, + EnableContentionProfiling: defaults.EnableContentionProfiling, + } +} + +func (o *FeatureOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.BoolVar(&o.EnableProfiling, "profiling", o.EnableProfiling, + "Enable profiling via web interface host:port/debug/pprof/") + fs.BoolVar(&o.EnableContentionProfiling, "contention-profiling", o.EnableContentionProfiling, + "Enable lock contention profiling, if profiling is enabled") + dummy := false + fs.BoolVar(&dummy, "enable-swagger-ui", dummy, "Enables swagger ui on the apiserver at /swagger-ui") + fs.MarkDeprecated("enable-swagger-ui", "swagger 1.2 support has been removed") +} + +func (o *FeatureOptions) ApplyTo(c *server.Config) error { + if o == nil { + return nil + } + + c.EnableProfiling = o.EnableProfiling + c.EnableContentionProfiling = o.EnableContentionProfiling + + return nil +} + +func (o *FeatureOptions) Validate() []error { + if o == nil { + return nil + } + + errs := []error{} + return errs +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go new file mode 100644 index 000000000..b8d60517a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -0,0 +1,171 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/storage/storagebackend" + "k8s.io/apiserver/pkg/util/feature" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + "k8s.io/client-go/kubernetes" + "k8s.io/component-base/featuregate" + "k8s.io/klog/v2" +) + +// RecommendedOptions contains the recommended options for running an API server. +// If you add something to this list, it should be in a logical grouping. +// Each of them can be nil to leave the feature unconfigured on ApplyTo. +type RecommendedOptions struct { + Etcd *EtcdOptions + SecureServing *SecureServingOptionsWithLoopback + Authentication *DelegatingAuthenticationOptions + Authorization *DelegatingAuthorizationOptions + Audit *AuditOptions + Features *FeatureOptions + CoreAPI *CoreAPIOptions + + // FeatureGate is a way to plumb feature gate through if you have them. + FeatureGate featuregate.FeatureGate + // ExtraAdmissionInitializers is called once after all ApplyTo from the options above, to pass the returned + // admission plugin initializers to Admission.ApplyTo. + ExtraAdmissionInitializers func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) + Admission *AdmissionOptions + // API Server Egress Selector is used to control outbound traffic from the API Server + EgressSelector *EgressSelectorOptions + // Traces contains options to control distributed request tracing. + Traces *TracingOptions +} + +func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { + sso := NewSecureServingOptions() + + // We are composing recommended options for an aggregated api-server, + // whose client is typically a proxy multiplexing many operations --- + // notably including long-running ones --- into one HTTP/2 connection + // into this server. So allow many concurrent operations. + sso.HTTP2MaxStreamsPerConnection = 1000 + + return &RecommendedOptions{ + Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, codec)), + SecureServing: sso.WithLoopback(), + Authentication: NewDelegatingAuthenticationOptions(), + Authorization: NewDelegatingAuthorizationOptions(), + Audit: NewAuditOptions(), + Features: NewFeatureOptions(), + CoreAPI: NewCoreAPIOptions(), + // Wired a global by default that sadly people will abuse to have different meanings in different repos. + // Please consider creating your own FeatureGate so you can have a consistent meaning for what a variable contains + // across different repos. Future you will thank you. + FeatureGate: feature.DefaultFeatureGate, + ExtraAdmissionInitializers: func(c *server.RecommendedConfig) ([]admission.PluginInitializer, error) { return nil, nil }, + Admission: NewAdmissionOptions(), + EgressSelector: NewEgressSelectorOptions(), + Traces: NewTracingOptions(), + } +} + +func (o *RecommendedOptions) AddFlags(fs *pflag.FlagSet) { + o.Etcd.AddFlags(fs) + o.SecureServing.AddFlags(fs) + o.Authentication.AddFlags(fs) + o.Authorization.AddFlags(fs) + o.Audit.AddFlags(fs) + o.Features.AddFlags(fs) + o.CoreAPI.AddFlags(fs) + o.Admission.AddFlags(fs) + o.EgressSelector.AddFlags(fs) + o.Traces.AddFlags(fs) +} + +// ApplyTo adds RecommendedOptions to the server configuration. +// pluginInitializers can be empty, it is only need for additional initializers. +func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { + if err := o.Etcd.ApplyTo(&config.Config); err != nil { + return err + } + if err := o.EgressSelector.ApplyTo(&config.Config); err != nil { + return err + } + if feature.DefaultFeatureGate.Enabled(features.APIServerTracing) { + if err := o.Traces.ApplyTo(config.Config.EgressSelector, &config.Config); err != nil { + return err + } + } + if err := o.SecureServing.ApplyTo(&config.Config.SecureServing, &config.Config.LoopbackClientConfig); err != nil { + return err + } + if err := o.Authentication.ApplyTo(&config.Config.Authentication, config.SecureServing, config.OpenAPIConfig); err != nil { + return err + } + if err := o.Authorization.ApplyTo(&config.Config.Authorization); err != nil { + return err + } + if err := o.Audit.ApplyTo(&config.Config); err != nil { + return err + } + if err := o.Features.ApplyTo(&config.Config); err != nil { + return err + } + if err := o.CoreAPI.ApplyTo(config); err != nil { + return err + } + if initializers, err := o.ExtraAdmissionInitializers(config); err != nil { + return err + } else if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, o.FeatureGate, initializers...); err != nil { + return err + } + if feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) { + if config.ClientConfig != nil { + if config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight <= 0 { + return fmt.Errorf("invalid configuration: MaxRequestsInFlight=%d and MaxMutatingRequestsInFlight=%d; they must add up to something positive", config.MaxRequestsInFlight, config.MaxMutatingRequestsInFlight) + + } + config.FlowControl = utilflowcontrol.New( + config.SharedInformerFactory, + kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta1(), + config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight, + config.RequestTimeout/4, + ) + } else { + klog.Warningf("Neither kubeconfig is provided nor service-account is mounted, so APIPriorityAndFairness will be disabled") + } + } + return nil +} + +func (o *RecommendedOptions) Validate() []error { + errors := []error{} + errors = append(errors, o.Etcd.Validate()...) + errors = append(errors, o.SecureServing.Validate()...) + errors = append(errors, o.Authentication.Validate()...) + errors = append(errors, o.Authorization.Validate()...) + errors = append(errors, o.Audit.Validate()...) + errors = append(errors, o.Features.Validate()...) + errors = append(errors, o.CoreAPI.Validate()...) + errors = append(errors, o.Admission.Validate()...) + errors = append(errors, o.EgressSelector.Validate()...) + errors = append(errors, o.Traces.Validate()...) + + return errors +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go new file mode 100644 index 000000000..9758eec11 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -0,0 +1,249 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "net" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/server" + utilfeature "k8s.io/apiserver/pkg/util/feature" + + "github.com/spf13/pflag" +) + +// ServerRunOptions contains the options while running a generic api server. +type ServerRunOptions struct { + AdvertiseAddress net.IP + + CorsAllowedOriginList []string + HSTSDirectives []string + ExternalHost string + MaxRequestsInFlight int + MaxMutatingRequestsInFlight int + RequestTimeout time.Duration + GoawayChance float64 + LivezGracePeriod time.Duration + MinRequestTimeout int + ShutdownDelayDuration time.Duration + // We intentionally did not add a flag for this option. Users of the + // apiserver library can wire it to a flag. + JSONPatchMaxCopyBytes int64 + // The limit on the request body size that would be accepted and + // decoded in a write request. 0 means no limit. + // We intentionally did not add a flag for this option. Users of the + // apiserver library can wire it to a flag. + MaxRequestBodyBytes int64 + EnablePriorityAndFairness bool +} + +func NewServerRunOptions() *ServerRunOptions { + defaults := server.NewConfig(serializer.CodecFactory{}) + return &ServerRunOptions{ + MaxRequestsInFlight: defaults.MaxRequestsInFlight, + MaxMutatingRequestsInFlight: defaults.MaxMutatingRequestsInFlight, + RequestTimeout: defaults.RequestTimeout, + LivezGracePeriod: defaults.LivezGracePeriod, + MinRequestTimeout: defaults.MinRequestTimeout, + ShutdownDelayDuration: defaults.ShutdownDelayDuration, + JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes, + MaxRequestBodyBytes: defaults.MaxRequestBodyBytes, + EnablePriorityAndFairness: true, + } +} + +// ApplyTo applies the run options to the method receiver and returns self +func (s *ServerRunOptions) ApplyTo(c *server.Config) error { + c.CorsAllowedOriginList = s.CorsAllowedOriginList + c.HSTSDirectives = s.HSTSDirectives + c.ExternalAddress = s.ExternalHost + c.MaxRequestsInFlight = s.MaxRequestsInFlight + c.MaxMutatingRequestsInFlight = s.MaxMutatingRequestsInFlight + c.LivezGracePeriod = s.LivezGracePeriod + c.RequestTimeout = s.RequestTimeout + c.GoawayChance = s.GoawayChance + c.MinRequestTimeout = s.MinRequestTimeout + c.ShutdownDelayDuration = s.ShutdownDelayDuration + c.JSONPatchMaxCopyBytes = s.JSONPatchMaxCopyBytes + c.MaxRequestBodyBytes = s.MaxRequestBodyBytes + c.PublicAddress = s.AdvertiseAddress + + return nil +} + +// DefaultAdvertiseAddress sets the field AdvertiseAddress if unset. The field will be set based on the SecureServingOptions. +func (s *ServerRunOptions) DefaultAdvertiseAddress(secure *SecureServingOptions) error { + if secure == nil { + return nil + } + + if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() { + hostIP, err := secure.DefaultExternalAddress() + if err != nil { + return fmt.Errorf("Unable to find suitable network address.error='%v'. "+ + "Try to set the AdvertiseAddress directly or provide a valid BindAddress to fix this.", err) + } + s.AdvertiseAddress = hostIP + } + + return nil +} + +// Validate checks validation of ServerRunOptions +func (s *ServerRunOptions) Validate() []error { + errors := []error{} + + if s.LivezGracePeriod < 0 { + errors = append(errors, fmt.Errorf("--livez-grace-period can not be a negative value")) + } + + if s.MaxRequestsInFlight < 0 { + errors = append(errors, fmt.Errorf("--max-requests-inflight can not be negative value")) + } + if s.MaxMutatingRequestsInFlight < 0 { + errors = append(errors, fmt.Errorf("--max-mutating-requests-inflight can not be negative value")) + } + + if s.RequestTimeout.Nanoseconds() < 0 { + errors = append(errors, fmt.Errorf("--request-timeout can not be negative value")) + } + + if s.GoawayChance < 0 || s.GoawayChance > 0.02 { + errors = append(errors, fmt.Errorf("--goaway-chance can not be less than 0 or greater than 0.02")) + } + + if s.MinRequestTimeout < 0 { + errors = append(errors, fmt.Errorf("--min-request-timeout can not be negative value")) + } + + if s.ShutdownDelayDuration < 0 { + errors = append(errors, fmt.Errorf("--shutdown-delay-duration can not be negative value")) + } + + if s.JSONPatchMaxCopyBytes < 0 { + errors = append(errors, fmt.Errorf("--json-patch-max-copy-bytes can not be negative value")) + } + + if s.MaxRequestBodyBytes < 0 { + errors = append(errors, fmt.Errorf("--max-resource-write-bytes can not be negative value")) + } + + if err := validateHSTSDirectives(s.HSTSDirectives); err != nil { + errors = append(errors, err) + } + return errors +} + +func validateHSTSDirectives(hstsDirectives []string) error { + // HSTS Headers format: Strict-Transport-Security:max-age=expireTime [;includeSubDomains] [;preload] + // See https://tools.ietf.org/html/rfc6797#section-6.1 for more information + allErrors := []error{} + for _, hstsDirective := range hstsDirectives { + if len(strings.TrimSpace(hstsDirective)) == 0 { + allErrors = append(allErrors, fmt.Errorf("empty value in strict-transport-security-directives")) + continue + } + if hstsDirective != "includeSubDomains" && hstsDirective != "preload" { + maxAgeDirective := strings.Split(hstsDirective, "=") + if len(maxAgeDirective) != 2 || maxAgeDirective[0] != "max-age" { + allErrors = append(allErrors, fmt.Errorf("--strict-transport-security-directives invalid, allowed values: max-age=expireTime, includeSubDomains, preload. see https://tools.ietf.org/html/rfc6797#section-6.1 for more information")) + } + } + } + return errors.NewAggregate(allErrors) +} + +// AddUniversalFlags adds flags for a specific APIServer to the specified FlagSet +func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { + // Note: the weird ""+ in below lines seems to be the only way to get gofmt to + // arrange these text blocks sensibly. Grrr. + + fs.IPVar(&s.AdvertiseAddress, "advertise-address", s.AdvertiseAddress, ""+ + "The IP address on which to advertise the apiserver to members of the cluster. This "+ + "address must be reachable by the rest of the cluster. If blank, the --bind-address "+ + "will be used. If --bind-address is unspecified, the host's default interface will "+ + "be used.") + + fs.StringSliceVar(&s.CorsAllowedOriginList, "cors-allowed-origins", s.CorsAllowedOriginList, ""+ + "List of allowed origins for CORS, comma separated. An allowed origin can be a regular "+ + "expression to support subdomain matching. If this list is empty CORS will not be enabled.") + + fs.StringSliceVar(&s.HSTSDirectives, "strict-transport-security-directives", s.HSTSDirectives, ""+ + "List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not "+ + "be added. Example: 'max-age=31536000,includeSubDomains,preload'") + + deprecatedTargetRAMMB := 0 + fs.IntVar(&deprecatedTargetRAMMB, "target-ram-mb", deprecatedTargetRAMMB, + "DEPRECATED: Memory limit for apiserver in MB (used to configure sizes of caches, etc.)") + fs.MarkDeprecated("target-ram-mb", "This flag will be removed in v1.23") + + fs.StringVar(&s.ExternalHost, "external-hostname", s.ExternalHost, + "The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery).") + + deprecatedMasterServiceNamespace := metav1.NamespaceDefault + fs.StringVar(&deprecatedMasterServiceNamespace, "master-service-namespace", deprecatedMasterServiceNamespace, ""+ + "DEPRECATED: the namespace from which the Kubernetes master services should be injected into pods.") + + fs.IntVar(&s.MaxRequestsInFlight, "max-requests-inflight", s.MaxRequestsInFlight, ""+ + "This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit "+ + "(which must be positive) if --enable-priority-and-fairness is true. "+ + "Otherwise, this flag limits the maximum number of non-mutating requests in flight, "+ + "or a zero value disables the limit completely.") + + fs.IntVar(&s.MaxMutatingRequestsInFlight, "max-mutating-requests-inflight", s.MaxMutatingRequestsInFlight, ""+ + "This and --max-requests-inflight are summed to determine the server's total concurrency limit "+ + "(which must be positive) if --enable-priority-and-fairness is true. "+ + "Otherwise, this flag limits the maximum number of mutating requests in flight, "+ + "or a zero value disables the limit completely.") + + fs.DurationVar(&s.RequestTimeout, "request-timeout", s.RequestTimeout, ""+ + "An optional field indicating the duration a handler must keep a request open before timing "+ + "it out. This is the default request timeout for requests but may be overridden by flags such as "+ + "--min-request-timeout for specific types of requests.") + + fs.Float64Var(&s.GoawayChance, "goaway-chance", s.GoawayChance, ""+ + "To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). "+ + "The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. "+ + "This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. "+ + "Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point.") + + fs.DurationVar(&s.LivezGracePeriod, "livez-grace-period", s.LivezGracePeriod, ""+ + "This option represents the maximum amount of time it should take for apiserver to complete its startup sequence "+ + "and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume "+ + "that unfinished post-start hooks will complete successfully and therefore return true.") + + fs.IntVar(&s.MinRequestTimeout, "min-request-timeout", s.MinRequestTimeout, ""+ + "An optional field indicating the minimum number of seconds a handler must keep "+ + "a request open before timing it out. Currently only honored by the watch request "+ + "handler, which picks a randomized value above this number as the connection timeout, "+ + "to spread out load.") + + fs.BoolVar(&s.EnablePriorityAndFairness, "enable-priority-and-fairness", s.EnablePriorityAndFairness, ""+ + "If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness") + + fs.DurationVar(&s.ShutdownDelayDuration, "shutdown-delay-duration", s.ShutdownDelayDuration, ""+ + "Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez "+ + "will return success, but /readyz immediately returns failure. Graceful termination starts after this delay "+ + "has elapsed. This can be used to allow load balancer to stop sending traffic to this server.") + + utilfeature.DefaultMutableFeatureGate.AddFlag(fs) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving.go b/vendor/k8s.io/apiserver/pkg/server/options/serving.go new file mode 100644 index 000000000..f435ba5b8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving.go @@ -0,0 +1,383 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "context" + "fmt" + "net" + "path" + "strconv" + "strings" + "syscall" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + cliflag "k8s.io/component-base/cli/flag" +) + +type SecureServingOptions struct { + BindAddress net.IP + // BindPort is ignored when Listener is set, will serve https even with 0. + BindPort int + // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", + // "tcp4", and "tcp6". + BindNetwork string + // Required set to true means that BindPort cannot be zero. + Required bool + // ExternalAddress is the address advertised, even if BindAddress is a loopback. By default this + // is set to BindAddress if the later no loopback, or to the first host interface address. + ExternalAddress net.IP + + // Listener is the secure server network listener. + // either Listener or BindAddress/BindPort/BindNetwork is set, + // if Listener is set, use it and omit BindAddress/BindPort/BindNetwork. + Listener net.Listener + + // ServerCert is the TLS cert info for serving secure traffic + ServerCert GeneratableKeyCert + // SNICertKeys are named CertKeys for serving secure traffic with SNI support. + SNICertKeys []cliflag.NamedCertKey + // CipherSuites is the list of allowed cipher suites for the server. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + CipherSuites []string + // MinTLSVersion is the minimum TLS version supported. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + MinTLSVersion string + + // HTTP2MaxStreamsPerConnection is the limit that the api server imposes on each client. + // A value of zero means to use the default provided by golang's HTTP/2 support. + HTTP2MaxStreamsPerConnection int + + // PermitPortSharing controls if SO_REUSEPORT is used when binding the port, which allows + // more than one instance to bind on the same address and port. + PermitPortSharing bool + + // PermitAddressSharing controls if SO_REUSEADDR is used when binding the port. + PermitAddressSharing bool +} + +type CertKey struct { + // CertFile is a file containing a PEM-encoded certificate, and possibly the complete certificate chain + CertFile string + // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string +} + +type GeneratableKeyCert struct { + // CertKey allows setting an explicit cert/key file to use. + CertKey CertKey + + // CertDirectory specifies a directory to write generated certificates to if CertFile/KeyFile aren't explicitly set. + // PairName is used to determine the filenames within CertDirectory. + // If CertDirectory and PairName are not set, an in-memory certificate will be generated. + CertDirectory string + // PairName is the name which will be used with CertDirectory to make a cert and key filenames. + // It becomes CertDirectory/PairName.crt and CertDirectory/PairName.key + PairName string + + // GeneratedCert holds an in-memory generated certificate if CertFile/KeyFile aren't explicitly set, and CertDirectory/PairName are not set. + GeneratedCert dynamiccertificates.CertKeyContentProvider + + // FixtureDirectory is a directory that contains test fixture used to avoid regeneration of certs during tests. + // The format is: + // _-_-.crt + // _-_-.key + FixtureDirectory string +} + +func NewSecureServingOptions() *SecureServingOptions { + return &SecureServingOptions{ + BindAddress: net.ParseIP("0.0.0.0"), + BindPort: 443, + ServerCert: GeneratableKeyCert{ + PairName: "apiserver", + CertDirectory: "apiserver.local.config/certificates", + }, + } +} + +func (s *SecureServingOptions) DefaultExternalAddress() (net.IP, error) { + if s.ExternalAddress != nil && !s.ExternalAddress.IsUnspecified() { + return s.ExternalAddress, nil + } + return utilnet.ResolveBindAddress(s.BindAddress) +} + +func (s *SecureServingOptions) Validate() []error { + if s == nil { + return nil + } + + errors := []error{} + + if s.Required && s.BindPort < 1 || s.BindPort > 65535 { + errors = append(errors, fmt.Errorf("--secure-port %v must be between 1 and 65535, inclusive. It cannot be turned off with 0", s.BindPort)) + } else if s.BindPort < 0 || s.BindPort > 65535 { + errors = append(errors, fmt.Errorf("--secure-port %v must be between 0 and 65535, inclusive. 0 for turning off secure port", s.BindPort)) + } + + if (len(s.ServerCert.CertKey.CertFile) != 0 || len(s.ServerCert.CertKey.KeyFile) != 0) && s.ServerCert.GeneratedCert != nil { + errors = append(errors, fmt.Errorf("cert/key file and in-memory certificate cannot both be set")) + } + + return errors +} + +func (s *SecureServingOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } + + fs.IPVar(&s.BindAddress, "bind-address", s.BindAddress, ""+ + "The IP address on which to listen for the --secure-port port. The "+ + "associated interface(s) must be reachable by the rest of the cluster, and by CLI/web "+ + "clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used.") + + desc := "The port on which to serve HTTPS with authentication and authorization." + if s.Required { + desc += " It cannot be switched off with 0." + } else { + desc += " If 0, don't serve HTTPS at all." + } + fs.IntVar(&s.BindPort, "secure-port", s.BindPort, desc) + + fs.StringVar(&s.ServerCert.CertDirectory, "cert-dir", s.ServerCert.CertDirectory, ""+ + "The directory where the TLS certs are located. "+ + "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") + + fs.StringVar(&s.ServerCert.CertKey.CertFile, "tls-cert-file", s.ServerCert.CertKey.CertFile, ""+ + "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated "+ + "after server cert). If HTTPS serving is enabled, and --tls-cert-file and "+ + "--tls-private-key-file are not provided, a self-signed certificate and key "+ + "are generated for the public address and saved to the directory specified by --cert-dir.") + + fs.StringVar(&s.ServerCert.CertKey.KeyFile, "tls-private-key-file", s.ServerCert.CertKey.KeyFile, + "File containing the default x509 private key matching --tls-cert-file.") + + tlsCipherPreferredValues := cliflag.PreferredTLSCipherNames() + tlsCipherInsecureValues := cliflag.InsecureTLSCipherNames() + fs.StringSliceVar(&s.CipherSuites, "tls-cipher-suites", s.CipherSuites, + "Comma-separated list of cipher suites for the server. "+ + "If omitted, the default Go cipher suites will be used. \n"+ + "Preferred values: "+strings.Join(tlsCipherPreferredValues, ", ")+". \n"+ + "Insecure values: "+strings.Join(tlsCipherInsecureValues, ", ")+".") + + tlsPossibleVersions := cliflag.TLSPossibleVersions() + fs.StringVar(&s.MinTLSVersion, "tls-min-version", s.MinTLSVersion, + "Minimum TLS version supported. "+ + "Possible values: "+strings.Join(tlsPossibleVersions, ", ")) + + fs.Var(cliflag.NewNamedCertKeyArray(&s.SNICertKeys), "tls-sni-cert-key", ""+ + "A pair of x509 certificate and private key file paths, optionally suffixed with a list of "+ + "domain patterns which are fully qualified domain names, possibly with prefixed wildcard "+ + "segments. The domain patterns also allow IP addresses, but IPs should only be used if "+ + "the apiserver has visibility to the IP address requested by a client. "+ + "If no domain patterns are provided, the names of the certificate are "+ + "extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns "+ + "trump over extracted names. For multiple key/certificate pairs, use the "+ + "--tls-sni-cert-key multiple times. "+ + "Examples: \"example.crt,example.key\" or \"foo.crt,foo.key:*.foo.com,foo.com\".") + + fs.IntVar(&s.HTTP2MaxStreamsPerConnection, "http2-max-streams-per-connection", s.HTTP2MaxStreamsPerConnection, ""+ + "The limit that the server gives to clients for "+ + "the maximum number of streams in an HTTP/2 connection. "+ + "Zero means to use golang's default.") + + fs.BoolVar(&s.PermitPortSharing, "permit-port-sharing", s.PermitPortSharing, + "If true, SO_REUSEPORT will be used when binding the port, which allows "+ + "more than one instance to bind on the same address and port. [default=false]") + + fs.BoolVar(&s.PermitAddressSharing, "permit-address-sharing", s.PermitAddressSharing, + "If true, SO_REUSEADDR will be used when binding the port. This allows binding "+ + "to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting "+ + "for the kernel to release sockets in TIME_WAIT state. [default=false]") +} + +// ApplyTo fills up serving information in the server configuration. +func (s *SecureServingOptions) ApplyTo(config **server.SecureServingInfo) error { + if s == nil { + return nil + } + if s.BindPort <= 0 && s.Listener == nil { + return nil + } + + if s.Listener == nil { + var err error + addr := net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.BindPort)) + + c := net.ListenConfig{} + + ctls := multipleControls{} + if s.PermitPortSharing { + ctls = append(ctls, permitPortReuse) + } + if s.PermitAddressSharing { + ctls = append(ctls, permitAddressReuse) + } + if len(ctls) > 0 { + c.Control = ctls.Control + } + + s.Listener, s.BindPort, err = CreateListener(s.BindNetwork, addr, c) + if err != nil { + return fmt.Errorf("failed to create listener: %v", err) + } + } else { + if _, ok := s.Listener.Addr().(*net.TCPAddr); !ok { + return fmt.Errorf("failed to parse ip and port from listener") + } + s.BindPort = s.Listener.Addr().(*net.TCPAddr).Port + s.BindAddress = s.Listener.Addr().(*net.TCPAddr).IP + } + + *config = &server.SecureServingInfo{ + Listener: s.Listener, + HTTP2MaxStreamsPerConnection: s.HTTP2MaxStreamsPerConnection, + } + c := *config + + serverCertFile, serverKeyFile := s.ServerCert.CertKey.CertFile, s.ServerCert.CertKey.KeyFile + // load main cert + if len(serverCertFile) != 0 || len(serverKeyFile) != 0 { + var err error + c.Cert, err = dynamiccertificates.NewDynamicServingContentFromFiles("serving-cert", serverCertFile, serverKeyFile) + if err != nil { + return err + } + } else if s.ServerCert.GeneratedCert != nil { + c.Cert = s.ServerCert.GeneratedCert + } + + if len(s.CipherSuites) != 0 { + cipherSuites, err := cliflag.TLSCipherSuites(s.CipherSuites) + if err != nil { + return err + } + c.CipherSuites = cipherSuites + } + + var err error + c.MinTLSVersion, err = cliflag.TLSVersion(s.MinTLSVersion) + if err != nil { + return err + } + + // load SNI certs + namedTLSCerts := make([]dynamiccertificates.SNICertKeyContentProvider, 0, len(s.SNICertKeys)) + for _, nck := range s.SNICertKeys { + tlsCert, err := dynamiccertificates.NewDynamicSNIContentFromFiles("sni-serving-cert", nck.CertFile, nck.KeyFile, nck.Names...) + namedTLSCerts = append(namedTLSCerts, tlsCert) + if err != nil { + return fmt.Errorf("failed to load SNI cert and key: %v", err) + } + } + c.SNICerts = namedTLSCerts + + return nil +} + +func (s *SecureServingOptions) MaybeDefaultWithSelfSignedCerts(publicAddress string, alternateDNS []string, alternateIPs []net.IP) error { + if s == nil || (s.BindPort == 0 && s.Listener == nil) { + return nil + } + keyCert := &s.ServerCert.CertKey + if len(keyCert.CertFile) != 0 || len(keyCert.KeyFile) != 0 { + return nil + } + + canReadCertAndKey := false + if len(s.ServerCert.CertDirectory) > 0 { + if len(s.ServerCert.PairName) == 0 { + return fmt.Errorf("PairName is required if CertDirectory is set") + } + keyCert.CertFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".crt") + keyCert.KeyFile = path.Join(s.ServerCert.CertDirectory, s.ServerCert.PairName+".key") + if canRead, err := certutil.CanReadCertAndKey(keyCert.CertFile, keyCert.KeyFile); err != nil { + return err + } else { + canReadCertAndKey = canRead + } + } + + if !canReadCertAndKey { + // add either the bind address or localhost to the valid alternates + if s.BindAddress.IsUnspecified() { + alternateDNS = append(alternateDNS, "localhost") + } else { + alternateIPs = append(alternateIPs, s.BindAddress) + } + + if cert, key, err := certutil.GenerateSelfSignedCertKeyWithFixtures(publicAddress, alternateIPs, alternateDNS, s.ServerCert.FixtureDirectory); err != nil { + return fmt.Errorf("unable to generate self signed cert: %v", err) + } else if len(keyCert.CertFile) > 0 && len(keyCert.KeyFile) > 0 { + if err := certutil.WriteCert(keyCert.CertFile, cert); err != nil { + return err + } + if err := keyutil.WriteKey(keyCert.KeyFile, key); err != nil { + return err + } + klog.Infof("Generated self-signed cert (%s, %s)", keyCert.CertFile, keyCert.KeyFile) + } else { + s.ServerCert.GeneratedCert, err = dynamiccertificates.NewStaticCertKeyContent("Generated self signed cert", cert, key) + if err != nil { + return err + } + klog.Infof("Generated self-signed cert in-memory") + } + } + + return nil +} + +func CreateListener(network, addr string, config net.ListenConfig) (net.Listener, int, error) { + if len(network) == 0 { + network = "tcp" + } + + ln, err := config.Listen(context.TODO(), network, addr) + if err != nil { + return nil, 0, fmt.Errorf("failed to listen on %v: %v", addr, err) + } + + // get port + tcpAddr, ok := ln.Addr().(*net.TCPAddr) + if !ok { + ln.Close() + return nil, 0, fmt.Errorf("invalid listen address: %q", ln.Addr().String()) + } + + return ln, tcpAddr.Port, nil +} + +type multipleControls []func(network, addr string, conn syscall.RawConn) error + +func (mcs multipleControls) Control(network, addr string, conn syscall.RawConn) error { + for _, c := range mcs { + if err := c(network, addr, conn); err != nil { + return err + } + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go new file mode 100644 index 000000000..5bf87e4b1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go @@ -0,0 +1,43 @@ +// +build !windows + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "syscall" + + "golang.org/x/sys/unix" + + "k8s.io/klog/v2" +) + +func permitPortReuse(network, addr string, conn syscall.RawConn) error { + return conn.Control(func(fd uintptr) { + if err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { + klog.Warningf("failed to set SO_REUSEPORT on socket: %v", err) + } + }) +} + +func permitAddressReuse(network, addr string, conn syscall.RawConn) error { + return conn.Control(func(fd uintptr) { + if err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1); err != nil { + klog.Warningf("failed to set SO_REUSEADDR on socket: %v", err) + } + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go new file mode 100644 index 000000000..1663acee0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go @@ -0,0 +1,34 @@ +// +build windows + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + "syscall" +) + +func permitPortReuse(network, address string, c syscall.RawConn) error { + return fmt.Errorf("port reuse is not supported on Windows") +} + +// Windows supports SO_REUSEADDR, but it may cause undefined behavior, as +// there is no protection against port hijacking. +func permitAddressReuse(network, addr string, conn syscall.RawConn) error { + return fmt.Errorf("address reuse is not supported on Windows") +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go new file mode 100644 index 000000000..2317be82d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_with_loopback.go @@ -0,0 +1,81 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "github.com/google/uuid" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + "k8s.io/client-go/rest" + certutil "k8s.io/client-go/util/cert" +) + +type SecureServingOptionsWithLoopback struct { + *SecureServingOptions +} + +func (o *SecureServingOptions) WithLoopback() *SecureServingOptionsWithLoopback { + return &SecureServingOptionsWithLoopback{o} +} + +// ApplyTo fills up serving information in the server configuration. +func (s *SecureServingOptionsWithLoopback) ApplyTo(secureServingInfo **server.SecureServingInfo, loopbackClientConfig **rest.Config) error { + if s == nil || s.SecureServingOptions == nil || secureServingInfo == nil { + return nil + } + + if err := s.SecureServingOptions.ApplyTo(secureServingInfo); err != nil { + return err + } + + if *secureServingInfo == nil || loopbackClientConfig == nil { + return nil + } + + // create self-signed cert+key with the fake server.LoopbackClientServerNameOverride and + // let the server return it when the loopback client connects. + certPem, keyPem, err := certutil.GenerateSelfSignedCertKey(server.LoopbackClientServerNameOverride, nil, nil) + if err != nil { + return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) + } + certProvider, err := dynamiccertificates.NewStaticSNICertKeyContent("self-signed loopback", certPem, keyPem, server.LoopbackClientServerNameOverride) + if err != nil { + return fmt.Errorf("failed to generate self-signed certificate for loopback connection: %v", err) + } + + // Write to the front of SNICerts so that this overrides any other certs with the same name + (*secureServingInfo).SNICerts = append([]dynamiccertificates.SNICertKeyContentProvider{certProvider}, (*secureServingInfo).SNICerts...) + + secureLoopbackClientConfig, err := (*secureServingInfo).NewLoopbackClientConfig(uuid.New().String(), certPem) + switch { + // if we failed and there's no fallback loopback client config, we need to fail + case err != nil && *loopbackClientConfig == nil: + (*secureServingInfo).SNICerts = (*secureServingInfo).SNICerts[1:] + return err + + // if we failed, but we already have a fallback loopback client config (usually insecure), allow it + case err != nil && *loopbackClientConfig != nil: + + default: + *loopbackClientConfig = secureLoopbackClientConfig + } + + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/tracing.go b/vendor/k8s.io/apiserver/pkg/server/options/tracing.go new file mode 100644 index 000000000..bea7f363a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/options/tracing.go @@ -0,0 +1,127 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "context" + "fmt" + "net" + + "github.com/spf13/pflag" + "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/semconv" + "google.golang.org/grpc" + + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/apiserver/pkg/tracing" + "k8s.io/component-base/traces" + "k8s.io/utils/path" +) + +const apiserverService = "apiserver" + +// TracingOptions contain configuration options for tracing +// exporters +type TracingOptions struct { + // ConfigFile is the file path with api-server tracing configuration. + ConfigFile string +} + +// NewTracingOptions creates a new instance of TracingOptions +func NewTracingOptions() *TracingOptions { + return &TracingOptions{} +} + +// AddFlags adds flags related to tracing to the specified FlagSet +func (o *TracingOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + + fs.StringVar(&o.ConfigFile, "tracing-config-file", o.ConfigFile, + "File with apiserver tracing configuration.") +} + +// ApplyTo fills up Tracing config with options. +func (o *TracingOptions) ApplyTo(es *egressselector.EgressSelector, c *server.Config) error { + if o == nil || o.ConfigFile == "" { + return nil + } + + npConfig, err := tracing.ReadTracingConfiguration(o.ConfigFile) + if err != nil { + return fmt.Errorf("failed to read tracing config: %v", err) + } + + errs := tracing.ValidateTracingConfiguration(npConfig) + if len(errs) > 0 { + return fmt.Errorf("failed to validate tracing configuration: %v", errs.ToAggregate()) + } + + opts := []otlpgrpc.Option{} + if npConfig.Endpoint != nil { + opts = append(opts, otlpgrpc.WithEndpoint(*npConfig.Endpoint)) + } + if es != nil { + // Only use the egressselector dialer if egressselector is enabled. + // Endpoint is on the "ControlPlane" network + egressDialer, err := es.Lookup(egressselector.ControlPlane.AsNetworkContext()) + if err != nil { + return err + } + + otelDialer := func(ctx context.Context, addr string) (net.Conn, error) { + return egressDialer(ctx, "tcp", addr) + } + opts = append(opts, otlpgrpc.WithDialOption(grpc.WithContextDialer(otelDialer))) + } + + sampler := sdktrace.NeverSample() + if npConfig.SamplingRatePerMillion != nil && *npConfig.SamplingRatePerMillion > 0 { + sampler = sdktrace.TraceIDRatioBased(float64(*npConfig.SamplingRatePerMillion) / float64(1000000)) + } + + resourceOpts := []resource.Option{ + resource.WithAttributes( + semconv.ServiceNameKey.String(apiserverService), + semconv.ServiceInstanceIDKey.String(c.APIServerID), + ), + } + tp := traces.NewProvider(context.Background(), sampler, resourceOpts, opts...) + c.TracerProvider = &tp + if c.LoopbackClientConfig != nil { + c.LoopbackClientConfig.Wrap(traces.WrapperFor(c.TracerProvider)) + } + return nil +} + +// Validate verifies flags passed to TracingOptions. +func (o *TracingOptions) Validate() (errs []error) { + if o == nil || o.ConfigFile == "" { + return + } + + if exists, err := path.Exists(path.CheckFollowSymlink, o.ConfigFile); !exists { + errs = append(errs, fmt.Errorf("tracing-config-file %s does not exist", o.ConfigFile)) + } else if err != nil { + errs = append(errs, fmt.Errorf("error checking if tracing-config-file %s exists: %v", o.ConfigFile, err)) + } + return +} diff --git a/vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go new file mode 100644 index 000000000..0dae21535 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resourceconfig contains the resource config related helper functions. +package resourceconfig // import "k8s.io/apiserver/pkg/server/resourceconfig" diff --git a/vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go new file mode 100644 index 000000000..bfcce54b8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/resourceconfig/helpers.go @@ -0,0 +1,201 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourceconfig + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serverstore "k8s.io/apiserver/pkg/server/storage" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/klog/v2" +) + +// GroupVersionRegistry provides access to registered group versions. +type GroupVersionRegistry interface { + // IsGroupRegistered returns true if given group is registered. + IsGroupRegistered(group string) bool + // IsVersionRegistered returns true if given version is registered. + IsVersionRegistered(v schema.GroupVersion) bool + // PrioritizedVersionsAllGroups returns all registered group versions. + PrioritizedVersionsAllGroups() []schema.GroupVersion +} + +// MergeResourceEncodingConfigs merges the given defaultResourceConfig with specific GroupVersionResource overrides. +func MergeResourceEncodingConfigs( + defaultResourceEncoding *serverstore.DefaultResourceEncodingConfig, + resourceEncodingOverrides []schema.GroupVersionResource, +) *serverstore.DefaultResourceEncodingConfig { + resourceEncodingConfig := defaultResourceEncoding + for _, gvr := range resourceEncodingOverrides { + resourceEncodingConfig.SetResourceEncoding(gvr.GroupResource(), gvr.GroupVersion(), + schema.GroupVersion{Group: gvr.Group, Version: runtime.APIVersionInternal}) + } + return resourceEncodingConfig +} + +// Recognized values for the --runtime-config parameter to enable/disable groups of APIs +const ( + APIAll = "api/all" + APIGA = "api/ga" + APIBeta = "api/beta" + APIAlpha = "api/alpha" +) + +var ( + gaPattern = regexp.MustCompile(`^v\d+$`) + betaPattern = regexp.MustCompile(`^v\d+beta\d+$`) + alphaPattern = regexp.MustCompile(`^v\d+alpha\d+$`) + + matchers = map[string]func(gv schema.GroupVersion) bool{ + // allows users to address all api versions + APIAll: func(gv schema.GroupVersion) bool { return true }, + // allows users to address all api versions in the form v[0-9]+ + APIGA: func(gv schema.GroupVersion) bool { return gaPattern.MatchString(gv.Version) }, + // allows users to address all beta api versions + APIBeta: func(gv schema.GroupVersion) bool { return betaPattern.MatchString(gv.Version) }, + // allows users to address all alpha api versions + APIAlpha: func(gv schema.GroupVersion) bool { return alphaPattern.MatchString(gv.Version) }, + } + + matcherOrder = []string{APIAll, APIGA, APIBeta, APIAlpha} +) + +// MergeAPIResourceConfigs merges the given defaultAPIResourceConfig with the given resourceConfigOverrides. +// Exclude the groups not registered in registry, and check if version is +// not registered in group, then it will fail. +func MergeAPIResourceConfigs( + defaultAPIResourceConfig *serverstore.ResourceConfig, + resourceConfigOverrides cliflag.ConfigurationMap, + registry GroupVersionRegistry, +) (*serverstore.ResourceConfig, error) { + resourceConfig := defaultAPIResourceConfig + overrides := resourceConfigOverrides + + for _, flag := range matcherOrder { + if value, ok := overrides[flag]; ok { + if value == "false" { + resourceConfig.DisableMatchingVersions(matchers[flag]) + } else if value == "true" { + resourceConfig.EnableMatchingVersions(matchers[flag]) + } else { + return nil, fmt.Errorf("invalid value %v=%v", flag, value) + } + } + } + + // "={true|false} allows users to enable/disable API. + // This takes preference over api/all, if specified. + // Iterate through all group/version overrides specified in runtimeConfig. + for key := range overrides { + // Have already handled them above. Can skip them here. + if _, ok := matchers[key]; ok { + continue + } + + tokens := strings.Split(key, "/") + if len(tokens) < 2 { + continue + } + groupVersionString := tokens[0] + "/" + tokens[1] + groupVersion, err := schema.ParseGroupVersion(groupVersionString) + if err != nil { + return nil, fmt.Errorf("invalid key %s", key) + } + + // individual resource enablement/disablement is only supported in the extensions/v1beta1 API group for legacy reasons. + // all other API groups are expected to contain coherent sets of resources that are enabled/disabled together. + if len(tokens) > 2 && (groupVersion != schema.GroupVersion{Group: "extensions", Version: "v1beta1"}) { + klog.Warningf("ignoring invalid key %s, individual resource enablement/disablement is not supported in %s, and will prevent starting in future releases", key, groupVersion.String()) + continue + } + + // Exclude group not registered into the registry. + if !registry.IsGroupRegistered(groupVersion.Group) { + continue + } + + // Verify that the groupVersion is registered into registry. + if !registry.IsVersionRegistered(groupVersion) { + return nil, fmt.Errorf("group version %s that has not been registered", groupVersion.String()) + } + enabled, err := getRuntimeConfigValue(overrides, key, false) + if err != nil { + return nil, err + } + if enabled { + // enable the groupVersion for "group/version=true" and "group/version/resource=true" + resourceConfig.EnableVersions(groupVersion) + } else if len(tokens) == 2 { + // disable the groupVersion only for "group/version=false", not "group/version/resource=false" + resourceConfig.DisableVersions(groupVersion) + } + + if len(tokens) < 3 { + continue + } + groupVersionResource := groupVersion.WithResource(tokens[2]) + if enabled { + resourceConfig.EnableResources(groupVersionResource) + } else { + resourceConfig.DisableResources(groupVersionResource) + } + } + + return resourceConfig, nil +} + +func getRuntimeConfigValue(overrides cliflag.ConfigurationMap, apiKey string, defaultValue bool) (bool, error) { + flagValue, ok := overrides[apiKey] + if ok { + if flagValue == "" { + return true, nil + } + boolValue, err := strconv.ParseBool(flagValue) + if err != nil { + return false, fmt.Errorf("invalid value of %s: %s, err: %v", apiKey, flagValue, err) + } + return boolValue, nil + } + return defaultValue, nil +} + +// ParseGroups takes in resourceConfig and returns parsed groups. +func ParseGroups(resourceConfig cliflag.ConfigurationMap) ([]string, error) { + groups := []string{} + for key := range resourceConfig { + if _, ok := matchers[key]; ok { + continue + } + tokens := strings.Split(key, "/") + if len(tokens) != 2 && len(tokens) != 3 { + return groups, fmt.Errorf("runtime-config invalid key %s", key) + } + groupVersionString := tokens[0] + "/" + tokens[1] + groupVersion, err := schema.ParseGroupVersion(groupVersionString) + if err != nil { + return nil, fmt.Errorf("runtime-config invalid key %s", key) + } + groups = append(groups, groupVersion.Group) + } + + return groups, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go new file mode 100644 index 000000000..03efddb5b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -0,0 +1,1472 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "context" + "fmt" + "net/http" + "reflect" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +var ( + emptyFunc = func() {} +) + +const ( + // storageWatchListPageSize is the cacher's request chunk size of + // initial and resync watch lists to storage. + storageWatchListPageSize = int64(10000) + // defaultBookmarkFrequency defines how frequently watch bookmarks should be send + // in addition to sending a bookmark right before watch deadline. + // + // NOTE: Update `eventFreshDuration` when changing this value. + defaultBookmarkFrequency = time.Minute +) + +// Config contains the configuration for a given Cache. +type Config struct { + // An underlying storage.Interface. + Storage storage.Interface + + // An underlying storage.Versioner. + Versioner storage.Versioner + + // The Cache will be caching objects of a given Type and assumes that they + // are all stored under ResourcePrefix directory in the underlying database. + ResourcePrefix string + + // KeyFunc is used to get a key in the underlying storage for a given object. + KeyFunc func(runtime.Object) (string, error) + + // GetAttrsFunc is used to get object labels, fields + GetAttrsFunc func(runtime.Object) (label labels.Set, field fields.Set, err error) + + // IndexerFuncs is used for optimizing amount of watchers that + // needs to process an incoming event. + IndexerFuncs storage.IndexerFuncs + + // Indexers is used to accelerate the list operation, falls back to regular list + // operation if no indexer found. + Indexers *cache.Indexers + + // NewFunc is a function that creates new empty object storing a object of type Type. + NewFunc func() runtime.Object + + // NewList is a function that creates new empty object storing a list of + // objects of type Type. + NewListFunc func() runtime.Object + + Codec runtime.Codec + + Clock clock.Clock +} + +type watchersMap map[int]*cacheWatcher + +func (wm watchersMap) addWatcher(w *cacheWatcher, number int) { + wm[number] = w +} + +func (wm watchersMap) deleteWatcher(number int, done func(*cacheWatcher)) { + if watcher, ok := wm[number]; ok { + delete(wm, number) + done(watcher) + } +} + +func (wm watchersMap) terminateAll(done func(*cacheWatcher)) { + for key, watcher := range wm { + delete(wm, key) + done(watcher) + } +} + +type indexedWatchers struct { + allWatchers watchersMap + valueWatchers map[string]watchersMap +} + +func (i *indexedWatchers) addWatcher(w *cacheWatcher, number int, value string, supported bool) { + if supported { + if _, ok := i.valueWatchers[value]; !ok { + i.valueWatchers[value] = watchersMap{} + } + i.valueWatchers[value].addWatcher(w, number) + } else { + i.allWatchers.addWatcher(w, number) + } +} + +func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool, done func(*cacheWatcher)) { + if supported { + i.valueWatchers[value].deleteWatcher(number, done) + if len(i.valueWatchers[value]) == 0 { + delete(i.valueWatchers, value) + } + } else { + i.allWatchers.deleteWatcher(number, done) + } +} + +func (i *indexedWatchers) terminateAll(objectType reflect.Type, done func(*cacheWatcher)) { + if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 { + klog.Warningf("Terminating all watchers from cacher %v", objectType) + } + i.allWatchers.terminateAll(done) + for _, watchers := range i.valueWatchers { + watchers.terminateAll(done) + } + i.valueWatchers = map[string]watchersMap{} +} + +// As we don't need a high precision here, we keep all watchers timeout within a +// second in a bucket, and pop up them once at the timeout. To be more specific, +// if you set fire time at X, you can get the bookmark within (X-1,X+1) period. +type watcherBookmarkTimeBuckets struct { + lock sync.Mutex + // the key of watcherBuckets is the number of seconds since createTime + watchersBuckets map[int64][]*cacheWatcher + createTime time.Time + startBucketID int64 + clock clock.Clock + bookmarkFrequency time.Duration +} + +func newTimeBucketWatchers(clock clock.Clock, bookmarkFrequency time.Duration) *watcherBookmarkTimeBuckets { + return &watcherBookmarkTimeBuckets{ + watchersBuckets: make(map[int64][]*cacheWatcher), + createTime: clock.Now(), + startBucketID: 0, + clock: clock, + bookmarkFrequency: bookmarkFrequency, + } +} + +// adds a watcher to the bucket, if the deadline is before the start, it will be +// added to the first one. +func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool { + nextTime, ok := w.nextBookmarkTime(t.clock.Now(), t.bookmarkFrequency) + if !ok { + return false + } + bucketID := int64(nextTime.Sub(t.createTime) / time.Second) + t.lock.Lock() + defer t.lock.Unlock() + if bucketID < t.startBucketID { + bucketID = t.startBucketID + } + watchers, _ := t.watchersBuckets[bucketID] + t.watchersBuckets[bucketID] = append(watchers, w) + return true +} + +func (t *watcherBookmarkTimeBuckets) popExpiredWatchers() [][]*cacheWatcher { + currentBucketID := int64(t.clock.Since(t.createTime) / time.Second) + // There should be one or two elements in almost all cases + expiredWatchers := make([][]*cacheWatcher, 0, 2) + t.lock.Lock() + defer t.lock.Unlock() + for ; t.startBucketID <= currentBucketID; t.startBucketID++ { + if watchers, ok := t.watchersBuckets[t.startBucketID]; ok { + delete(t.watchersBuckets, t.startBucketID) + expiredWatchers = append(expiredWatchers, watchers) + } + } + return expiredWatchers +} + +type filterWithAttrsFunc func(key string, l labels.Set, f fields.Set) bool + +type indexedTriggerFunc struct { + indexName string + indexerFunc storage.IndexerFunc +} + +// Cacher is responsible for serving WATCH and LIST requests for a given +// resource from its internal cache and updating its cache in the background +// based on the underlying storage contents. +// Cacher implements storage.Interface (although most of the calls are just +// delegated to the underlying storage). +type Cacher struct { + // HighWaterMarks for performance debugging. + // Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms + // See: https://golang.org/pkg/sync/atomic/ for more information + incomingHWM storage.HighWaterMark + // Incoming events that should be dispatched to watchers. + incoming chan watchCacheEvent + + sync.RWMutex + + // Before accessing the cacher's cache, wait for the ready to be ok. + // This is necessary to prevent users from accessing structures that are + // uninitialized or are being repopulated right now. + // ready needs to be set to false when the cacher is paused or stopped. + // ready needs to be set to true when the cacher is ready to use after + // initialization. + ready *ready + + // Underlying storage.Interface. + storage storage.Interface + + // Expected type of objects in the underlying cache. + objectType reflect.Type + + // "sliding window" of recent changes of objects and the current state. + watchCache *watchCache + reflector *cache.Reflector + + // Versioner is used to handle resource versions. + versioner storage.Versioner + + // newFunc is a function that creates new empty object storing a object of type Type. + newFunc func() runtime.Object + + // indexedTrigger is used for optimizing amount of watchers that needs to process + // an incoming event. + indexedTrigger *indexedTriggerFunc + // watchers is mapping from the value of trigger function that a + // watcher is interested into the watchers + watcherIdx int + watchers indexedWatchers + + // Defines a time budget that can be spend on waiting for not-ready watchers + // while dispatching event before shutting them down. + dispatchTimeoutBudget timeBudget + + // Handling graceful termination. + stopLock sync.RWMutex + stopped bool + stopCh chan struct{} + stopWg sync.WaitGroup + + clock clock.Clock + // timer is used to avoid unnecessary allocations in underlying watchers. + timer *time.Timer + + // dispatching determines whether there is currently dispatching of + // any event in flight. + dispatching bool + // watchersBuffer is a list of watchers potentially interested in currently + // dispatched event. + watchersBuffer []*cacheWatcher + // blockedWatchers is a list of watchers whose buffer is currently full. + blockedWatchers []*cacheWatcher + // watchersToStop is a list of watchers that were supposed to be stopped + // during current dispatching, but stopping was deferred to the end of + // dispatching that event to avoid race with closing channels in watchers. + watchersToStop []*cacheWatcher + // Maintain a timeout queue to send the bookmark event before the watcher times out. + bookmarkWatchers *watcherBookmarkTimeBuckets +} + +// NewCacherFromConfig creates a new Cacher responsible for servicing WATCH and LIST requests from +// its internal cache and updating its cache in the background based on the +// given configuration. +func NewCacherFromConfig(config Config) (*Cacher, error) { + stopCh := make(chan struct{}) + obj := config.NewFunc() + // Give this error when it is constructed rather than when you get the + // first watch item, because it's much easier to track down that way. + if err := runtime.CheckCodec(config.Codec, obj); err != nil { + return nil, fmt.Errorf("storage codec doesn't seem to match given type: %v", err) + } + + var indexedTrigger *indexedTriggerFunc + if config.IndexerFuncs != nil { + // For now, we don't support multiple trigger functions defined + // for a given resource. + if len(config.IndexerFuncs) > 1 { + return nil, fmt.Errorf("cacher %s doesn't support more than one IndexerFunc: ", reflect.TypeOf(obj).String()) + } + for key, value := range config.IndexerFuncs { + if value != nil { + indexedTrigger = &indexedTriggerFunc{ + indexName: key, + indexerFunc: value, + } + } + } + } + + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + objType := reflect.TypeOf(obj) + cacher := &Cacher{ + ready: newReady(), + storage: config.Storage, + objectType: objType, + versioner: config.Versioner, + newFunc: config.NewFunc, + indexedTrigger: indexedTrigger, + watcherIdx: 0, + watchers: indexedWatchers{ + allWatchers: make(map[int]*cacheWatcher), + valueWatchers: make(map[string]watchersMap), + }, + // TODO: Figure out the correct value for the buffer size. + incoming: make(chan watchCacheEvent, 100), + dispatchTimeoutBudget: newTimeBudget(stopCh), + // We need to (potentially) stop both: + // - wait.Until go-routine + // - reflector.ListAndWatch + // and there are no guarantees on the order that they will stop. + // So we will be simply closing the channel, and synchronizing on the WaitGroup. + stopCh: stopCh, + clock: config.Clock, + timer: time.NewTimer(time.Duration(0)), + bookmarkWatchers: newTimeBucketWatchers(config.Clock, defaultBookmarkFrequency), + } + + // Ensure that timer is stopped. + if !cacher.timer.Stop() { + // Consume triggered (but not yet received) timer event + // so that future reuse does not get a spurious timeout. + <-cacher.timer.C + } + + watchCache := newWatchCache( + config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, objType) + listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + reflectorName := "storage/cacher.go:" + config.ResourcePrefix + + reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) + // Configure reflector's pager to for an appropriate pagination chunk size for fetching data from + // storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error. + reflector.WatchListPageSize = storageWatchListPageSize + + cacher.watchCache = watchCache + cacher.reflector = reflector + + go cacher.dispatchEvents() + + cacher.stopWg.Add(1) + go func() { + defer cacher.stopWg.Done() + defer cacher.terminateAllWatchers() + wait.Until( + func() { + if !cacher.isStopped() { + cacher.startCaching(stopCh) + } + }, time.Second, stopCh, + ) + }() + + return cacher, nil +} + +func (c *Cacher) startCaching(stopChannel <-chan struct{}) { + // The 'usable' lock is always 'RLock'able when it is safe to use the cache. + // It is safe to use the cache after a successful list until a disconnection. + // We start with usable (write) locked. The below OnReplace function will + // unlock it after a successful list. The below defer will then re-lock + // it when this function exits (always due to disconnection), only if + // we actually got a successful list. This cycle will repeat as needed. + successfulList := false + c.watchCache.SetOnReplace(func() { + successfulList = true + c.ready.set(true) + klog.V(1).Infof("cacher (%v): initialized", c.objectType.String()) + }) + defer func() { + if successfulList { + c.ready.set(false) + } + }() + + c.terminateAllWatchers() + // Note that since onReplace may be not called due to errors, we explicitly + // need to retry it on errors under lock. + // Also note that startCaching is called in a loop, so there's no need + // to have another loop here. + if err := c.reflector.ListAndWatch(stopChannel); err != nil { + klog.Errorf("cacher (%v): unexpected ListAndWatch error: %v; reinitializing...", c.objectType.String(), err) + } +} + +// Versioner implements storage.Interface. +func (c *Cacher) Versioner() storage.Versioner { + return c.storage.Versioner() +} + +// Create implements storage.Interface. +func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + return c.storage.Create(ctx, key, obj, out, ttl) +} + +// Delete implements storage.Interface. +func (c *Cacher) Delete( + ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, + validateDeletion storage.ValidateObjectFunc, _ runtime.Object) error { + // Ignore the suggestion and try to pass down the current version of the object + // read from cache. + if elem, exists, err := c.watchCache.GetByKey(key); err != nil { + klog.Errorf("GetByKey returned error: %v", err) + } else if exists { + // DeepCopy the object since we modify resource version when serializing the + // current object. + currObj := elem.(*storeElement).Object.DeepCopyObject() + return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, currObj) + } + // If we couldn't get the object, fallback to no-suggestion. + return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, nil) +} + +// Watch implements storage.Interface. +func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + pred := opts.Predicate + watchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return nil, err + } + + c.ready.wait() + + triggerValue, triggerSupported := "", false + if c.indexedTrigger != nil { + for _, field := range pred.IndexFields { + if field == c.indexedTrigger.indexName { + if value, ok := pred.Field.RequiresExactMatch(field); ok { + triggerValue, triggerSupported = value, true + } + } + } + } + + // If there is indexedTrigger defined, but triggerSupported is false, + // we can't narrow the amount of events significantly at this point. + // + // That said, currently indexedTrigger is defined only for couple resources: + // Pods, Nodes, Secrets and ConfigMaps and there is only a constant + // number of watchers for which triggerSupported is false (excluding those + // issued explicitly by users). + // Thus, to reduce the risk of those watchers blocking all watchers of a + // given resource in the system, we increase the sizes of buffers for them. + chanSize := 10 + if c.indexedTrigger != nil && !triggerSupported { + // TODO: We should tune this value and ideally make it dependent on the + // number of objects of a given type and/or their churn. + chanSize = 1000 + } + + // Determine watch timeout('0' means deadline is not set, ignore checking) + deadline, _ := ctx.Deadline() + + identifier := fmt.Sprintf("key: %q, labels: %q, fields: %q", key, pred.Label, pred.Field) + + // Create a watcher here to reduce memory allocations under lock, + // given that memory allocation may trigger GC and block the thread. + // Also note that emptyFunc is a placeholder, until we will be able + // to compute watcher.forget function (which has to happen under lock). + watcher := newCacheWatcher(chanSize, filterWithAttrsFunction(key, pred), emptyFunc, c.versioner, deadline, pred.AllowWatchBookmarks, c.objectType, identifier) + + // We explicitly use thread unsafe version and do locking ourself to ensure that + // no new events will be processed in the meantime. The watchCache will be unlocked + // on return from this function. + // Note that we cannot do it under Cacher lock, to avoid a deadlock, since the + // underlying watchCache is calling processEvent under its lock. + c.watchCache.RLock() + defer c.watchCache.RUnlock() + initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV) + if err != nil { + // To match the uncached watch implementation, once we have passed authn/authz/admission, + // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, + // rather than a directly returned error. + return newErrWatcher(err), nil + } + + // With some events already sent, update resourceVersion so that + // events that were buffered and not yet processed won't be delivered + // to this watcher second time causing going back in time. + if len(initEvents) > 0 { + watchRV = initEvents[len(initEvents)-1].ResourceVersion + } + + func() { + c.Lock() + defer c.Unlock() + // Update watcher.forget function once we can compute it. + watcher.forget = forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported) + c.watchers.addWatcher(watcher, c.watcherIdx, triggerValue, triggerSupported) + + // Add it to the queue only when the client support watch bookmarks. + if watcher.allowWatchBookmarks { + c.bookmarkWatchers.addWatcher(watcher) + } + c.watcherIdx++ + }() + + go watcher.process(ctx, initEvents, watchRV) + return watcher, nil +} + +// WatchList implements storage.Interface. +func (c *Cacher) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + return c.Watch(ctx, key, opts) +} + +// Get implements storage.Interface. +func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error { + if opts.ResourceVersion == "" { + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). + return c.storage.Get(ctx, key, opts, objPtr) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + getRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return err + } + + if getRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.Get(ctx, key, opts, objPtr) + } + + // Do not create a trace - it's not for free and there are tons + // of Get requests. We can add it if it will be really needed. + c.ready.wait() + + objVal, err := conversion.EnforcePtr(objPtr) + if err != nil { + return err + } + + obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(getRV, key, nil) + if err != nil { + return err + } + + if exists { + elem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("non *storeElement returned from storage: %v", obj) + } + objVal.Set(reflect.ValueOf(elem.Object).Elem()) + } else { + objVal.Set(reflect.Zero(objVal.Type())) + if !opts.IgnoreNotFound { + return storage.NewKeyNotFoundError(key, int64(readResourceVersion)) + } + } + return nil +} + +func shouldDelegateList(opts storage.ListOptions) bool { + resourceVersion := opts.ResourceVersion + pred := opts.Predicate + pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) + hasContinuation := pagingEnabled && len(pred.Continue) > 0 + hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" + + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). If a continuation is + // requested, serve it from the underlying storage as well. + // Limits are only sent to storage when resourceVersion is non-zero + // since the watch cache isn't able to perform continuations, and + // limits are ignored when resource version is zero + return resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact +} + +// GetToList implements storage.Interface. +func (c *Cacher) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := opts.ResourceVersion + pred := opts.Predicate + if shouldDelegateList(opts) { + return c.storage.GetToList(ctx, key, opts, listObj) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + listRV, err := c.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + + if listRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.GetToList(ctx, key, opts, listObj) + } + + trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()}) + defer trace.LogIfLong(500 * time.Millisecond) + + c.ready.wait() + trace.Step("Ready") + + // List elements with at least 'listRV' from cache. + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + filter := filterWithAttrsFunction(key, pred) + + obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(listRV, key, trace) + if err != nil { + return err + } + trace.Step("Got from cache") + + if exists { + elem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("non *storeElement returned from storage: %v", obj) + } + if filter(elem.Key, elem.Labels, elem.Fields) { + listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) + } + } + if c.versioner != nil { + if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil { + return err + } + } + return nil +} + +// List implements storage.Interface. +func (c *Cacher) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + resourceVersion := opts.ResourceVersion + pred := opts.Predicate + if shouldDelegateList(opts) { + return c.storage.List(ctx, key, opts, listObj) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + listRV, err := c.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + + if listRV == 0 && !c.ready.check() { + // If Cacher is not yet initialized and we don't require any specific + // minimal resource version, simply forward the request to storage. + return c.storage.List(ctx, key, opts, listObj) + } + + trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()}) + defer trace.LogIfLong(500 * time.Millisecond) + + c.ready.wait() + trace.Step("Ready") + + // List elements with at least 'listRV' from cache. + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + filter := filterWithAttrsFunction(key, pred) + + objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV, pred.MatcherIndex(), trace) + if err != nil { + return err + } + trace.Step("Listed items from cache", utiltrace.Field{"count", len(objs)}) + if len(objs) > listVal.Cap() && pred.Label.Empty() && pred.Field.Empty() { + // Resize the slice appropriately, since we already know that none + // of the elements will be filtered out. + listVal.Set(reflect.MakeSlice(reflect.SliceOf(c.objectType.Elem()), 0, len(objs))) + trace.Step("Resized result") + } + for _, obj := range objs { + elem, ok := obj.(*storeElement) + if !ok { + return fmt.Errorf("non *storeElement returned from storage: %v", obj) + } + if filter(elem.Key, elem.Labels, elem.Fields) { + listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) + } + } + trace.Step("Filtered items", utiltrace.Field{"count", listVal.Len()}) + if c.versioner != nil { + if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil { + return err + } + } + return nil +} + +// GuaranteedUpdate implements storage.Interface. +func (c *Cacher) GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ runtime.Object) error { + // Ignore the suggestion and try to pass down the current version of the object + // read from cache. + if elem, exists, err := c.watchCache.GetByKey(key); err != nil { + klog.Errorf("GetByKey returned error: %v", err) + } else if exists { + // DeepCopy the object since we modify resource version when serializing the + // current object. + currObj := elem.(*storeElement).Object.DeepCopyObject() + return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj) + } + // If we couldn't get the object, fallback to no-suggestion. + return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, nil) +} + +// Count implements storage.Interface. +func (c *Cacher) Count(pathPrefix string) (int64, error) { + return c.storage.Count(pathPrefix) +} + +// baseObjectThreadUnsafe omits locking for cachingObject. +func baseObjectThreadUnsafe(object runtime.Object) runtime.Object { + if co, ok := object.(*cachingObject); ok { + return co.object + } + return object +} + +func (c *Cacher) triggerValuesThreadUnsafe(event *watchCacheEvent) ([]string, bool) { + if c.indexedTrigger == nil { + return nil, false + } + + result := make([]string, 0, 2) + result = append(result, c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.Object))) + if event.PrevObject == nil { + return result, true + } + prevTriggerValue := c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.PrevObject)) + if result[0] != prevTriggerValue { + result = append(result, prevTriggerValue) + } + return result, true +} + +func (c *Cacher) processEvent(event *watchCacheEvent) { + if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) { + // Monitor if this gets backed up, and how much. + klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen) + } + c.incoming <- *event +} + +func (c *Cacher) dispatchEvents() { + // Jitter to help level out any aggregate load. + bookmarkTimer := c.clock.NewTimer(wait.Jitter(time.Second, 0.25)) + defer bookmarkTimer.Stop() + + lastProcessedResourceVersion := uint64(0) + for { + select { + case event, ok := <-c.incoming: + if !ok { + return + } + // Don't dispatch bookmarks coming from the storage layer. + // They can be very frequent (even to the level of subseconds) + // to allow efficient watch resumption on kube-apiserver restarts, + // and propagating them down may overload the whole system. + // + // TODO: If at some point we decide the performance and scalability + // footprint is acceptable, this is the place to hook them in. + // However, we then need to check if this was called as a result + // of a bookmark event or regular Add/Update/Delete operation by + // checking if resourceVersion here has changed. + if event.Type != watch.Bookmark { + c.dispatchEvent(&event) + } + lastProcessedResourceVersion = event.ResourceVersion + case <-bookmarkTimer.C(): + bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25)) + // Never send a bookmark event if we did not see an event here, this is fine + // because we don't provide any guarantees on sending bookmarks. + if lastProcessedResourceVersion == 0 { + // pop expired watchers in case there has been no update + c.bookmarkWatchers.popExpiredWatchers() + continue + } + bookmarkEvent := &watchCacheEvent{ + Type: watch.Bookmark, + Object: c.newFunc(), + ResourceVersion: lastProcessedResourceVersion, + } + if err := c.versioner.UpdateObject(bookmarkEvent.Object, bookmarkEvent.ResourceVersion); err != nil { + klog.Errorf("failure to set resourceVersion to %d on bookmark event %+v", bookmarkEvent.ResourceVersion, bookmarkEvent.Object) + continue + } + c.dispatchEvent(bookmarkEvent) + case <-c.stopCh: + return + } + } +} + +func setCachingObjects(event *watchCacheEvent, versioner storage.Versioner) { + switch event.Type { + case watch.Added, watch.Modified: + if object, err := newCachingObject(event.Object); err == nil { + event.Object = object + } else { + klog.Errorf("couldn't create cachingObject from: %#v", event.Object) + } + // Don't wrap PrevObject for update event (for create events it is nil). + // We only encode those to deliver DELETE watch events, so if + // event.Object is not nil it can be used only for watchers for which + // selector was satisfied for its previous version and is no longer + // satisfied for the current version. + // This is rare enough that it doesn't justify making deep-copy of the + // object (done by newCachingObject) every time. + case watch.Deleted: + // Don't wrap Object for delete events - these are not to deliver any + // events. Only wrap PrevObject. + if object, err := newCachingObject(event.PrevObject); err == nil { + // Update resource version of the underlying object. + // event.PrevObject is used to deliver DELETE watch events and + // for them, we set resourceVersion to instead of + // the resourceVersion of the last modification of the object. + updateResourceVersionIfNeeded(object.object, versioner, event.ResourceVersion) + event.PrevObject = object + } else { + klog.Errorf("couldn't create cachingObject from: %#v", event.Object) + } + } +} + +func (c *Cacher) dispatchEvent(event *watchCacheEvent) { + c.startDispatching(event) + defer c.finishDispatching() + // Watchers stopped after startDispatching will be delayed to finishDispatching, + + // Since add() can block, we explicitly add when cacher is unlocked. + // Dispatching event in nonblocking way first, which make faster watchers + // not be blocked by slower ones. + if event.Type == watch.Bookmark { + for _, watcher := range c.watchersBuffer { + watcher.nonblockingAdd(event) + } + } else { + // Set up caching of object serializations only for dispatching this event. + // + // Storing serializations in memory would result in increased memory usage, + // but it would help for caching encodings for watches started from old + // versions. However, we still don't have a convincing data that the gain + // from it justifies increased memory usage, so for now we drop the cached + // serializations after dispatching this event. + // + // Given the deep-copies that are done to create cachingObjects, + // we try to cache serializations only if there are at least 3 watchers. + if len(c.watchersBuffer) >= 3 { + // Make a shallow copy to allow overwriting Object and PrevObject. + wcEvent := *event + setCachingObjects(&wcEvent, c.versioner) + event = &wcEvent + } + + c.blockedWatchers = c.blockedWatchers[:0] + for _, watcher := range c.watchersBuffer { + if !watcher.nonblockingAdd(event) { + c.blockedWatchers = append(c.blockedWatchers, watcher) + } + } + + if len(c.blockedWatchers) > 0 { + // dispatchEvent is called very often, so arrange + // to reuse timers instead of constantly allocating. + startTime := time.Now() + timeout := c.dispatchTimeoutBudget.takeAvailable() + c.timer.Reset(timeout) + + // Make sure every watcher will try to send event without blocking first, + // even if the timer has already expired. + timer := c.timer + for _, watcher := range c.blockedWatchers { + if !watcher.add(event, timer) { + // fired, clean the timer by set it to nil. + timer = nil + } + } + + // Stop the timer if it is not fired + if timer != nil && !timer.Stop() { + // Consume triggered (but not yet received) timer event + // so that future reuse does not get a spurious timeout. + <-timer.C + } + + c.dispatchTimeoutBudget.returnUnused(timeout - time.Since(startTime)) + } + } +} + +func (c *Cacher) startDispatchingBookmarkEvents() { + // Pop already expired watchers. However, explicitly ignore stopped ones, + // as we don't delete watcher from bookmarkWatchers when it is stopped. + for _, watchers := range c.bookmarkWatchers.popExpiredWatchers() { + for _, watcher := range watchers { + // c.Lock() is held here. + // watcher.stopThreadUnsafe() is protected by c.Lock() + if watcher.stopped { + continue + } + c.watchersBuffer = append(c.watchersBuffer, watcher) + // Requeue the watcher for the next bookmark if needed. + c.bookmarkWatchers.addWatcher(watcher) + } + } +} + +// startDispatching chooses watchers potentially interested in a given event +// a marks dispatching as true. +func (c *Cacher) startDispatching(event *watchCacheEvent) { + // It is safe to call triggerValuesThreadUnsafe here, because at this + // point only this thread can access this event (we create a separate + // watchCacheEvent for every dispatch). + triggerValues, supported := c.triggerValuesThreadUnsafe(event) + + c.Lock() + defer c.Unlock() + + c.dispatching = true + // We are reusing the slice to avoid memory reallocations in every + // dispatchEvent() call. That may prevent Go GC from freeing items + // from previous phases that are sitting behind the current length + // of the slice, but there is only a limited number of those and the + // gain from avoiding memory allocations is much bigger. + c.watchersBuffer = c.watchersBuffer[:0] + + if event.Type == watch.Bookmark { + c.startDispatchingBookmarkEvents() + // return here to reduce following code indentation and diff + return + } + + // Iterate over "allWatchers" no matter what the trigger function is. + for _, watcher := range c.watchers.allWatchers { + c.watchersBuffer = append(c.watchersBuffer, watcher) + } + if supported { + // Iterate over watchers interested in the given values of the trigger. + for _, triggerValue := range triggerValues { + for _, watcher := range c.watchers.valueWatchers[triggerValue] { + c.watchersBuffer = append(c.watchersBuffer, watcher) + } + } + } else { + // supported equal to false generally means that trigger function + // is not defined (or not aware of any indexes). In this case, + // watchers filters should generally also don't generate any + // trigger values, but can cause problems in case of some + // misconfiguration. Thus we paranoidly leave this branch. + + // Iterate over watchers interested in exact values for all values. + for _, watchers := range c.watchers.valueWatchers { + for _, watcher := range watchers { + c.watchersBuffer = append(c.watchersBuffer, watcher) + } + } + } +} + +// finishDispatching stops all the watchers that were supposed to be +// stopped in the meantime, but it was deferred to avoid closing input +// channels of watchers, as add() may still have writing to it. +// It also marks dispatching as false. +func (c *Cacher) finishDispatching() { + c.Lock() + defer c.Unlock() + c.dispatching = false + for _, watcher := range c.watchersToStop { + watcher.stopThreadUnsafe() + } + c.watchersToStop = c.watchersToStop[:0] +} + +func (c *Cacher) terminateAllWatchers() { + c.Lock() + defer c.Unlock() + c.watchers.terminateAll(c.objectType, c.stopWatcherThreadUnsafe) +} + +func (c *Cacher) stopWatcherThreadUnsafe(watcher *cacheWatcher) { + if c.dispatching { + c.watchersToStop = append(c.watchersToStop, watcher) + } else { + watcher.stopThreadUnsafe() + } +} + +func (c *Cacher) isStopped() bool { + c.stopLock.RLock() + defer c.stopLock.RUnlock() + return c.stopped +} + +// Stop implements the graceful termination. +func (c *Cacher) Stop() { + c.stopLock.Lock() + if c.stopped { + // avoid stopping twice (note: cachers are shared with subresources) + c.stopLock.Unlock() + return + } + c.stopped = true + c.stopLock.Unlock() + close(c.stopCh) + c.stopWg.Wait() +} + +func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported bool) func() { + return func() { + c.Lock() + defer c.Unlock() + + // It's possible that the watcher is already not in the structure (e.g. in case of + // simultaneous Stop() and terminateAllWatchers(), but it is safe to call stopThreadUnsafe() + // on a watcher multiple times. + c.watchers.deleteWatcher(index, triggerValue, triggerSupported, c.stopWatcherThreadUnsafe) + } +} + +func filterWithAttrsFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc { + filterFunc := func(objKey string, label labels.Set, field fields.Set) bool { + if !hasPathPrefix(objKey, key) { + return false + } + return p.MatchesObjectAttributes(label, field) + } + return filterFunc +} + +// LastSyncResourceVersion returns resource version to which the underlying cache is synced. +func (c *Cacher) LastSyncResourceVersion() (uint64, error) { + c.ready.wait() + + resourceVersion := c.reflector.LastSyncResourceVersion() + return c.versioner.ParseResourceVersion(resourceVersion) +} + +// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher. +type cacherListerWatcher struct { + storage storage.Interface + resourcePrefix string + newListFunc func() runtime.Object +} + +// NewCacherListerWatcher returns a storage.Interface backed ListerWatcher. +func NewCacherListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { + return &cacherListerWatcher{ + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + } +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) { + list := lw.newListFunc() + pred := storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: options.Limit, + Continue: options.Continue, + } + + if err := lw.storage.List(context.TODO(), lw.resourcePrefix, storage.ListOptions{ResourceVersionMatch: options.ResourceVersionMatch, Predicate: pred}, list); err != nil { + return nil, err + } + return list, nil +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) { + opts := storage.ListOptions{ + ResourceVersion: options.ResourceVersion, + Predicate: storage.Everything, + } + if utilfeature.DefaultFeatureGate.Enabled(features.EfficientWatchResumption) { + opts.ProgressNotify = true + } + return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, opts) +} + +// errWatcher implements watch.Interface to return a single error +type errWatcher struct { + result chan watch.Event +} + +func newErrWatcher(err error) *errWatcher { + // Create an error event + errEvent := watch.Event{Type: watch.Error} + switch err := err.(type) { + case runtime.Object: + errEvent.Object = err + case *errors.StatusError: + errEvent.Object = &err.ErrStatus + default: + errEvent.Object = &metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + Reason: metav1.StatusReasonInternalError, + Code: http.StatusInternalServerError, + } + } + + // Create a watcher with room for a single event, populate it, and close the channel + watcher := &errWatcher{result: make(chan watch.Event, 1)} + watcher.result <- errEvent + close(watcher.result) + + return watcher +} + +// Implements watch.Interface. +func (c *errWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *errWatcher) Stop() { + // no-op +} + +// cacheWatcher implements watch.Interface +// this is not thread-safe +type cacheWatcher struct { + input chan *watchCacheEvent + result chan watch.Event + done chan struct{} + filter filterWithAttrsFunc + stopped bool + forget func() + versioner storage.Versioner + // The watcher will be closed by server after the deadline, + // save it here to send bookmark events before that. + deadline time.Time + allowWatchBookmarks bool + // Object type of the cache watcher interests + objectType reflect.Type + + // human readable identifier that helps assigning cacheWatcher + // instance with request + identifier string +} + +func newCacheWatcher(chanSize int, filter filterWithAttrsFunc, forget func(), versioner storage.Versioner, deadline time.Time, allowWatchBookmarks bool, objectType reflect.Type, identifier string) *cacheWatcher { + return &cacheWatcher{ + input: make(chan *watchCacheEvent, chanSize), + result: make(chan watch.Event, chanSize), + done: make(chan struct{}), + filter: filter, + stopped: false, + forget: forget, + versioner: versioner, + deadline: deadline, + allowWatchBookmarks: allowWatchBookmarks, + objectType: objectType, + identifier: identifier, + } +} + +// Implements watch.Interface. +func (c *cacheWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *cacheWatcher) Stop() { + c.forget() +} + +// we rely on the fact that stopThredUnsafe is actually protected by Cacher.Lock() +func (c *cacheWatcher) stopThreadUnsafe() { + if !c.stopped { + c.stopped = true + close(c.done) + close(c.input) + } +} + +func (c *cacheWatcher) nonblockingAdd(event *watchCacheEvent) bool { + select { + case c.input <- event: + return true + default: + return false + } +} + +// Nil timer means that add will not block (if it can't send event immediately, it will break the watcher) +func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer) bool { + // Try to send the event immediately, without blocking. + if c.nonblockingAdd(event) { + return true + } + + closeFunc := func() { + // This means that we couldn't send event to that watcher. + // Since we don't want to block on it infinitely, + // we simply terminate it. + klog.V(1).Infof("Forcing %v watcher close due to unresponsiveness: %v. len(c.input) = %v, len(c.result) = %v", c.objectType.String(), c.identifier, len(c.input), len(c.result)) + terminatedWatchersCounter.WithLabelValues(c.objectType.String()).Inc() + c.forget() + } + + if timer == nil { + closeFunc() + return false + } + + // OK, block sending, but only until timer fires. + select { + case c.input <- event: + return true + case <-timer.C: + closeFunc() + return false + } +} + +func (c *cacheWatcher) nextBookmarkTime(now time.Time, bookmarkFrequency time.Duration) (time.Time, bool) { + // We try to send bookmarks: + // (a) roughly every minute + // (b) right before the watcher timeout - for now we simply set it 2s before + // the deadline + // The former gives us periodicity if the watch breaks due to unexpected + // conditions, the later ensures that on timeout the watcher is as close to + // now as possible - this covers 99% of cases. + heartbeatTime := now.Add(bookmarkFrequency) + if c.deadline.IsZero() { + // Timeout is set by our client libraries (e.g. reflector) as well as defaulted by + // apiserver if properly configured. So this shoudln't happen in practice. + return heartbeatTime, true + } + if pretimeoutTime := c.deadline.Add(-2 * time.Second); pretimeoutTime.Before(heartbeatTime) { + heartbeatTime = pretimeoutTime + } + + if heartbeatTime.Before(now) { + return time.Time{}, false + } + return heartbeatTime, true +} + +func getEventObject(object runtime.Object) runtime.Object { + if _, ok := object.(runtime.CacheableObject); ok { + // It is safe to return without deep-copy, because the underlying + // object was already deep-copied during construction. + return object + } + return object.DeepCopyObject() +} + +func updateResourceVersionIfNeeded(object runtime.Object, versioner storage.Versioner, resourceVersion uint64) { + if _, ok := object.(*cachingObject); ok { + // We assume that for cachingObject resourceVersion was already propagated before. + return + } + if err := versioner.UpdateObject(object, resourceVersion); err != nil { + utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", resourceVersion, object, err)) + } +} + +func (c *cacheWatcher) convertToWatchEvent(event *watchCacheEvent) *watch.Event { + if event.Type == watch.Bookmark { + return &watch.Event{Type: watch.Bookmark, Object: event.Object.DeepCopyObject()} + } + + curObjPasses := event.Type != watch.Deleted && c.filter(event.Key, event.ObjLabels, event.ObjFields) + oldObjPasses := false + if event.PrevObject != nil { + oldObjPasses = c.filter(event.Key, event.PrevObjLabels, event.PrevObjFields) + } + if !curObjPasses && !oldObjPasses { + // Watcher is not interested in that object. + return nil + } + + switch { + case curObjPasses && !oldObjPasses: + return &watch.Event{Type: watch.Added, Object: getEventObject(event.Object)} + case curObjPasses && oldObjPasses: + return &watch.Event{Type: watch.Modified, Object: getEventObject(event.Object)} + case !curObjPasses && oldObjPasses: + // return a delete event with the previous object content, but with the event's resource version + oldObj := getEventObject(event.PrevObject) + updateResourceVersionIfNeeded(oldObj, c.versioner, event.ResourceVersion) + return &watch.Event{Type: watch.Deleted, Object: oldObj} + } + + return nil +} + +// NOTE: sendWatchCacheEvent is assumed to not modify !!! +func (c *cacheWatcher) sendWatchCacheEvent(event *watchCacheEvent) { + watchEvent := c.convertToWatchEvent(event) + if watchEvent == nil { + // Watcher is not interested in that object. + return + } + + // We need to ensure that if we put event X to the c.result, all + // previous events were already put into it before, no matter whether + // c.done is close or not. + // Thus we cannot simply select from c.done and c.result and this + // would give us non-determinism. + // At the same time, we don't want to block infinitely on putting + // to c.result, when c.done is already closed. + + // This ensures that with c.done already close, we at most once go + // into the next select after this. With that, no matter which + // statement we choose there, we will deliver only consecutive + // events. + select { + case <-c.done: + return + default: + } + + select { + case c.result <- *watchEvent: + case <-c.done: + } +} + +func (c *cacheWatcher) process(ctx context.Context, initEvents []*watchCacheEvent, resourceVersion uint64) { + defer utilruntime.HandleCrash() + + // Check how long we are processing initEvents. + // As long as these are not processed, we are not processing + // any incoming events, so if it takes long, we may actually + // block all watchers for some time. + // TODO: From the logs it seems that there happens processing + // times even up to 1s which is very long. However, this doesn't + // depend that much on the number of initEvents. E.g. from the + // 2000-node Kubemark run we have logs like this, e.g.: + // ... processing 13862 initEvents took 66.808689ms + // ... processing 14040 initEvents took 993.532539ms + // We should understand what is blocking us in those cases (e.g. + // is it lack of CPU, network, or sth else) and potentially + // consider increase size of result buffer in those cases. + const initProcessThreshold = 500 * time.Millisecond + startTime := time.Now() + for _, event := range initEvents { + c.sendWatchCacheEvent(event) + } + objType := c.objectType.String() + if len(initEvents) > 0 { + initCounter.WithLabelValues(objType).Add(float64(len(initEvents))) + } + processingTime := time.Since(startTime) + if processingTime > initProcessThreshold { + klog.V(2).Infof("processing %d initEvents of %s (%s) took %v", len(initEvents), objType, c.identifier, processingTime) + } + + // At this point we already start processing incoming watch events. + // However, the init event can still be processed because their serialization + // and sending to the client happens asynchrnously. + // TODO: As describe in the KEP, we would like to estimate that by delaying + // the initialization signal proportionally to the number of events to + // process, but we're leaving this to the tuning phase. + utilflowcontrol.WatchInitialized(ctx) + + defer close(c.result) + defer c.Stop() + for { + select { + case event, ok := <-c.input: + if !ok { + return + } + // only send events newer than resourceVersion + if event.ResourceVersion > resourceVersion { + c.sendWatchCacheEvent(event) + } + case <-ctx.Done(): + return + } + } +} + +type ready struct { + ok bool + c *sync.Cond +} + +func newReady() *ready { + return &ready{c: sync.NewCond(&sync.RWMutex{})} +} + +func (r *ready) wait() { + r.c.L.Lock() + for !r.ok { + r.c.Wait() + } + r.c.L.Unlock() +} + +// TODO: Make check() function more sophisticated, in particular +// allow it to behave as "waitWithTimeout". +func (r *ready) check() bool { + rwMutex := r.c.L.(*sync.RWMutex) + rwMutex.RLock() + defer rwMutex.RUnlock() + return r.ok +} + +func (r *ready) set(ok bool) { + r.c.L.Lock() + defer r.c.L.Unlock() + r.ok = ok + r.c.Broadcast() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go new file mode 100644 index 000000000..752a28714 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go @@ -0,0 +1,397 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "bytes" + "fmt" + "io" + "reflect" + "runtime/debug" + "sync" + "sync/atomic" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" +) + +var _ runtime.CacheableObject = &cachingObject{} + +// metaRuntimeInterface implements runtime.Object and +// metav1.Object interfaces. +type metaRuntimeInterface interface { + runtime.Object + metav1.Object +} + +// serializationResult captures a result of serialization. +type serializationResult struct { + // once should be used to ensure serialization is computed once. + once sync.Once + + // raw is serialized object. + raw []byte + // err is error from serialization. + err error +} + +// serializationsCache is a type for caching serialization results. +type serializationsCache map[runtime.Identifier]*serializationResult + +// cachingObject is an object that is able to cache its serializations +// so that each of those is computed exactly once. +// +// cachingObject implements the metav1.Object interface (accessors for +// all metadata fields). However, setters for all fields except from +// SelfLink (which is set lately in the path) are ignored. +type cachingObject struct { + lock sync.RWMutex + + // Object for which serializations are cached. + object metaRuntimeInterface + + // serializations is a cache containing object`s serializations. + // The value stored in atomic.Value is of type serializationsCache. + // The atomic.Value type is used to allow fast-path. + serializations atomic.Value +} + +// newCachingObject performs a deep copy of the given object and wraps it +// into a cachingObject. +// An error is returned if it's not possible to cast the object to +// metav1.Object type. +func newCachingObject(object runtime.Object) (*cachingObject, error) { + if obj, ok := object.(metaRuntimeInterface); ok { + result := &cachingObject{object: obj.DeepCopyObject().(metaRuntimeInterface)} + result.serializations.Store(make(serializationsCache)) + return result, nil + } + return nil, fmt.Errorf("can't cast object to metav1.Object: %#v", object) +} + +func (o *cachingObject) getSerializationResult(id runtime.Identifier) *serializationResult { + // Fast-path for getting from cache. + serializations := o.serializations.Load().(serializationsCache) + if result, exists := serializations[id]; exists { + return result + } + + // Slow-path (that may require insert). + o.lock.Lock() + defer o.lock.Unlock() + + serializations = o.serializations.Load().(serializationsCache) + // Check if in the meantime it wasn't inserted. + if result, exists := serializations[id]; exists { + return result + } + + // Insert an entry for . This requires copy of existing map. + newSerializations := make(serializationsCache) + for k, v := range serializations { + newSerializations[k] = v + } + result := &serializationResult{} + newSerializations[id] = result + o.serializations.Store(newSerializations) + return result +} + +// CacheEncode implements runtime.CacheableObject interface. +// It serializes the object and writes the result to given io.Writer trying +// to first use the already cached result and falls back to a given encode +// function in case of cache miss. +// It assumes that for a given identifier, the encode function always encodes +// each input object into the same output format. +func (o *cachingObject) CacheEncode(id runtime.Identifier, encode func(runtime.Object, io.Writer) error, w io.Writer) error { + result := o.getSerializationResult(id) + result.once.Do(func() { + buffer := bytes.NewBuffer(nil) + result.err = encode(o.GetObject(), buffer) + result.raw = buffer.Bytes() + }) + // Once invoked, fields of serialization will not change. + if result.err != nil { + return result.err + } + _, err := w.Write(result.raw) + return err +} + +// GetObject implements runtime.CacheableObject interface. +// It returns deep-copy of the wrapped object to return ownership of it +// to the called according to the contract of the interface. +func (o *cachingObject) GetObject() runtime.Object { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.DeepCopyObject().(metaRuntimeInterface) +} + +// GetObjectKind implements runtime.Object interface. +func (o *cachingObject) GetObjectKind() schema.ObjectKind { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetObjectKind() +} + +// DeepCopyObject implements runtime.Object interface. +func (o *cachingObject) DeepCopyObject() runtime.Object { + // DeepCopyObject on cachingObject is not expected to be called anywhere. + // However, to be on the safe-side, we implement it, though given the + // cache is only an optimization we ignore copying it. + result := &cachingObject{} + result.serializations.Store(make(serializationsCache)) + + o.lock.RLock() + defer o.lock.RUnlock() + result.object = o.object.DeepCopyObject().(metaRuntimeInterface) + return result +} + +var ( + invalidationCacheTimestampLock sync.Mutex + invalidationCacheTimestamp time.Time +) + +// shouldLogCacheInvalidation allows for logging cache-invalidation +// at most once per second (to avoid spamming logs in case of issues). +func shouldLogCacheInvalidation(now time.Time) bool { + invalidationCacheTimestampLock.Lock() + defer invalidationCacheTimestampLock.Unlock() + if invalidationCacheTimestamp.Add(time.Second).Before(now) { + invalidationCacheTimestamp = now + return true + } + return false +} + +func (o *cachingObject) invalidateCacheLocked() { + if cache, ok := o.serializations.Load().(serializationsCache); ok && len(cache) == 0 { + return + } + // We don't expect cache invalidation to happen - so we want + // to log the stacktrace to allow debugging if that will happen. + // OTOH, we don't want to spam logs with it. + // So we try to log it at most once per second. + if shouldLogCacheInvalidation(time.Now()) { + klog.Warningf("Unexpected cache invalidation for %#v\n%s", o.object, string(debug.Stack())) + } + o.serializations.Store(make(serializationsCache)) +} + +// The following functions implement metav1.Object interface: +// - getters simply delegate for the underlying object +// - setters check if operations isn't noop and if so, +// invalidate the cache and delegate for the underlying object + +func (o *cachingObject) conditionalSet(isNoop func() bool, set func()) { + if fastPath := func() bool { + o.lock.RLock() + defer o.lock.RUnlock() + return isNoop() + }(); fastPath { + return + } + o.lock.Lock() + defer o.lock.Unlock() + if isNoop() { + return + } + o.invalidateCacheLocked() + set() +} + +func (o *cachingObject) GetNamespace() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetNamespace() +} +func (o *cachingObject) SetNamespace(namespace string) { + o.conditionalSet( + func() bool { return o.object.GetNamespace() == namespace }, + func() { o.object.SetNamespace(namespace) }, + ) +} +func (o *cachingObject) GetName() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetName() +} +func (o *cachingObject) SetName(name string) { + o.conditionalSet( + func() bool { return o.object.GetName() == name }, + func() { o.object.SetName(name) }, + ) +} +func (o *cachingObject) GetGenerateName() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetGenerateName() +} +func (o *cachingObject) SetGenerateName(name string) { + o.conditionalSet( + func() bool { return o.object.GetGenerateName() == name }, + func() { o.object.SetGenerateName(name) }, + ) +} +func (o *cachingObject) GetUID() types.UID { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetUID() +} +func (o *cachingObject) SetUID(uid types.UID) { + o.conditionalSet( + func() bool { return o.object.GetUID() == uid }, + func() { o.object.SetUID(uid) }, + ) +} +func (o *cachingObject) GetResourceVersion() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetResourceVersion() +} +func (o *cachingObject) SetResourceVersion(version string) { + o.conditionalSet( + func() bool { return o.object.GetResourceVersion() == version }, + func() { o.object.SetResourceVersion(version) }, + ) +} +func (o *cachingObject) GetGeneration() int64 { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetGeneration() +} +func (o *cachingObject) SetGeneration(generation int64) { + o.conditionalSet( + func() bool { return o.object.GetGeneration() == generation }, + func() { o.object.SetGeneration(generation) }, + ) +} +func (o *cachingObject) GetSelfLink() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetSelfLink() +} +func (o *cachingObject) SetSelfLink(selfLink string) { + o.conditionalSet( + func() bool { return o.object.GetSelfLink() == selfLink }, + func() { o.object.SetSelfLink(selfLink) }, + ) +} +func (o *cachingObject) GetCreationTimestamp() metav1.Time { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetCreationTimestamp() +} +func (o *cachingObject) SetCreationTimestamp(timestamp metav1.Time) { + o.conditionalSet( + func() bool { return o.object.GetCreationTimestamp() == timestamp }, + func() { o.object.SetCreationTimestamp(timestamp) }, + ) +} +func (o *cachingObject) GetDeletionTimestamp() *metav1.Time { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetDeletionTimestamp() +} +func (o *cachingObject) SetDeletionTimestamp(timestamp *metav1.Time) { + o.conditionalSet( + func() bool { return o.object.GetDeletionTimestamp() == timestamp }, + func() { o.object.SetDeletionTimestamp(timestamp) }, + ) +} +func (o *cachingObject) GetDeletionGracePeriodSeconds() *int64 { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetDeletionGracePeriodSeconds() +} +func (o *cachingObject) SetDeletionGracePeriodSeconds(gracePeriodSeconds *int64) { + o.conditionalSet( + func() bool { return o.object.GetDeletionGracePeriodSeconds() == gracePeriodSeconds }, + func() { o.object.SetDeletionGracePeriodSeconds(gracePeriodSeconds) }, + ) +} +func (o *cachingObject) GetLabels() map[string]string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetLabels() +} +func (o *cachingObject) SetLabels(labels map[string]string) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetLabels(), labels) }, + func() { o.object.SetLabels(labels) }, + ) +} +func (o *cachingObject) GetAnnotations() map[string]string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetAnnotations() +} +func (o *cachingObject) SetAnnotations(annotations map[string]string) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetAnnotations(), annotations) }, + func() { o.object.SetAnnotations(annotations) }, + ) +} +func (o *cachingObject) GetFinalizers() []string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetFinalizers() +} +func (o *cachingObject) SetFinalizers(finalizers []string) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetFinalizers(), finalizers) }, + func() { o.object.SetFinalizers(finalizers) }, + ) +} +func (o *cachingObject) GetOwnerReferences() []metav1.OwnerReference { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetOwnerReferences() +} +func (o *cachingObject) SetOwnerReferences(references []metav1.OwnerReference) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetOwnerReferences(), references) }, + func() { o.object.SetOwnerReferences(references) }, + ) +} +func (o *cachingObject) GetClusterName() string { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetClusterName() +} +func (o *cachingObject) SetClusterName(clusterName string) { + o.conditionalSet( + func() bool { return o.object.GetClusterName() == clusterName }, + func() { o.object.SetClusterName(clusterName) }, + ) +} +func (o *cachingObject) GetManagedFields() []metav1.ManagedFieldsEntry { + o.lock.RLock() + defer o.lock.RUnlock() + return o.object.GetManagedFields() +} +func (o *cachingObject) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) { + o.conditionalSet( + func() bool { return reflect.DeepEqual(o.object.GetManagedFields(), managedFields) }, + func() { o.object.SetManagedFields(managedFields) }, + ) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go new file mode 100644 index 000000000..4d41889f2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go @@ -0,0 +1,95 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + initCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "apiserver_init_events_total", + Help: "Counter of init events processed in watchcache broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) + + terminatedWatchersCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "apiserver_terminated_watchers_total", + Help: "Counter of watchers closed due to unresponsiveness broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) + + watchCacheCapacityIncreaseTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "watch_cache_capacity_increase_total", + Help: "Total number of watch cache capacity increase events broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) + + watchCacheCapacityDecreaseTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "watch_cache_capacity_decrease_total", + Help: "Total number of watch cache capacity decrease events broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) + + watchCacheCapacity = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Name: "watch_cache_capacity", + Help: "Total capacity of watch cache broken by resource type.", + StabilityLevel: metrics.ALPHA, + }, + []string{"resource"}, + ) +) + +func init() { + legacyregistry.MustRegister(initCounter) + legacyregistry.MustRegister(terminatedWatchersCounter) + legacyregistry.MustRegister(watchCacheCapacityIncreaseTotal) + legacyregistry.MustRegister(watchCacheCapacityDecreaseTotal) + legacyregistry.MustRegister(watchCacheCapacity) +} + +// recordsWatchCacheCapacityChange record watchCache capacity resize(increase or decrease) operations. +func recordsWatchCacheCapacityChange(objType string, old, new int) { + watchCacheCapacity.WithLabelValues(objType).Set(float64(new)) + if old < new { + watchCacheCapacityIncreaseTotal.WithLabelValues(objType).Inc() + return + } + watchCacheCapacityDecreaseTotal.WithLabelValues(objType).Inc() +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go new file mode 100644 index 000000000..2eb0fed32 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go @@ -0,0 +1,100 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "sync" + "time" +) + +const ( + refreshPerSecond = 50 * time.Millisecond + maxBudget = 100 * time.Millisecond +) + +// timeBudget implements a budget of time that you can use and is +// periodically being refreshed. The pattern to use it is: +// budget := newTimeBudget(...) +// ... +// timeout := budget.takeAvailable() +// // Now you can spend at most timeout on doing stuff +// ... +// // If you didn't use all timeout, return what you didn't use +// budget.returnUnused() +// +// NOTE: It's not recommended to be used concurrently from multiple threads - +// if first user takes the whole timeout, the second one will get 0 timeout +// even though the first one may return something later. +type timeBudget interface { + takeAvailable() time.Duration + returnUnused(unused time.Duration) +} + +type timeBudgetImpl struct { + sync.Mutex + budget time.Duration + + refresh time.Duration + maxBudget time.Duration +} + +func newTimeBudget(stopCh <-chan struct{}) timeBudget { + result := &timeBudgetImpl{ + budget: time.Duration(0), + refresh: refreshPerSecond, + maxBudget: maxBudget, + } + go result.periodicallyRefresh(stopCh) + return result +} + +func (t *timeBudgetImpl) periodicallyRefresh(stopCh <-chan struct{}) { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + t.Lock() + if t.budget = t.budget + t.refresh; t.budget > t.maxBudget { + t.budget = t.maxBudget + } + t.Unlock() + case <-stopCh: + return + } + } +} + +func (t *timeBudgetImpl) takeAvailable() time.Duration { + t.Lock() + defer t.Unlock() + result := t.budget + t.budget = time.Duration(0) + return result +} + +func (t *timeBudgetImpl) returnUnused(unused time.Duration) { + t.Lock() + defer t.Unlock() + if unused < 0 { + // We used more than allowed. + return + } + if t.budget = t.budget + unused; t.budget > t.maxBudget { + t.budget = t.maxBudget + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/util.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/util.go new file mode 100644 index 000000000..7943a93dc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/util.go @@ -0,0 +1,60 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "strings" +) + +// hasPathPrefix returns true if the string matches pathPrefix exactly, or if is prefixed with pathPrefix at a path segment boundary +func hasPathPrefix(s, pathPrefix string) bool { + // Short circuit if s doesn't contain the prefix at all + if !strings.HasPrefix(s, pathPrefix) { + return false + } + + pathPrefixLength := len(pathPrefix) + + if len(s) == pathPrefixLength { + // Exact match + return true + } + if strings.HasSuffix(pathPrefix, "/") { + // pathPrefix already ensured a path segment boundary + return true + } + if s[pathPrefixLength:pathPrefixLength+1] == "/" { + // The next character in s is a path segment boundary + // Check this instead of normalizing pathPrefix to avoid allocating on every call + return true + } + return false +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go new file mode 100644 index 000000000..b3c925e18 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -0,0 +1,633 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "fmt" + "reflect" + "sort" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +const ( + // blockTimeout determines how long we're willing to block the request + // to wait for a given resource version to be propagated to cache, + // before terminating request and returning Timeout error with retry + // after suggestion. + blockTimeout = 3 * time.Second + + // resourceVersionTooHighRetrySeconds is the seconds before a operation should be retried by the client + // after receiving a 'too high resource version' error. + resourceVersionTooHighRetrySeconds = 1 + + // eventFreshDuration is time duration of events we want to keep. + // We set it to `defaultBookmarkFrequency` plus epsilon to maximize + // chances that last bookmark was sent within kept history, at the + // same time, minimizing the needed memory usage. + eventFreshDuration = 75 * time.Second + + // defaultLowerBoundCapacity is a default value for event cache capacity's lower bound. + // TODO: Figure out, to what value we can decreased it. + defaultLowerBoundCapacity = 100 + + // defaultUpperBoundCapacity should be able to keep eventFreshDuration of history. + defaultUpperBoundCapacity = 100 * 1024 +) + +// watchCacheEvent is a single "watch event" that is send to users of +// watchCache. Additionally to a typical "watch.Event" it contains +// the previous value of the object to enable proper filtering in the +// upper layers. +type watchCacheEvent struct { + Type watch.EventType + Object runtime.Object + ObjLabels labels.Set + ObjFields fields.Set + PrevObject runtime.Object + PrevObjLabels labels.Set + PrevObjFields fields.Set + Key string + ResourceVersion uint64 + RecordTime time.Time +} + +// Computing a key of an object is generally non-trivial (it performs +// e.g. validation underneath). Similarly computing object fields and +// labels. To avoid computing them multiple times (to serve the event +// in different List/Watch requests), in the underlying store we are +// keeping structs (key, object, labels, fields). +type storeElement struct { + Key string + Object runtime.Object + Labels labels.Set + Fields fields.Set +} + +func storeElementKey(obj interface{}) (string, error) { + elem, ok := obj.(*storeElement) + if !ok { + return "", fmt.Errorf("not a storeElement: %v", obj) + } + return elem.Key, nil +} + +func storeElementObject(obj interface{}) (runtime.Object, error) { + elem, ok := obj.(*storeElement) + if !ok { + return nil, fmt.Errorf("not a storeElement: %v", obj) + } + return elem.Object, nil +} + +func storeElementIndexFunc(objIndexFunc cache.IndexFunc) cache.IndexFunc { + return func(obj interface{}) (strings []string, e error) { + seo, err := storeElementObject(obj) + if err != nil { + return nil, err + } + return objIndexFunc(seo) + } +} + +func storeElementIndexers(indexers *cache.Indexers) cache.Indexers { + if indexers == nil { + return cache.Indexers{} + } + ret := cache.Indexers{} + for indexName, indexFunc := range *indexers { + ret[indexName] = storeElementIndexFunc(indexFunc) + } + return ret +} + +// watchCache implements a Store interface. +// However, it depends on the elements implementing runtime.Object interface. +// +// watchCache is a "sliding window" (with a limited capacity) of objects +// observed from a watch. +type watchCache struct { + sync.RWMutex + + // Condition on which lists are waiting for the fresh enough + // resource version. + cond *sync.Cond + + // Maximum size of history window. + capacity int + + // upper bound of capacity since event cache has a dynamic size. + upperBoundCapacity int + + // lower bound of capacity since event cache has a dynamic size. + lowerBoundCapacity int + + // keyFunc is used to get a key in the underlying storage for a given object. + keyFunc func(runtime.Object) (string, error) + + // getAttrsFunc is used to get labels and fields of an object. + getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error) + + // cache is used a cyclic buffer - its first element (with the smallest + // resourceVersion) is defined by startIndex, its last element is defined + // by endIndex (if cache is full it will be startIndex + capacity). + // Both startIndex and endIndex can be greater than buffer capacity - + // you should always apply modulo capacity to get an index in cache array. + cache []*watchCacheEvent + startIndex int + endIndex int + + // store will effectively support LIST operation from the "end of cache + // history" i.e. from the moment just after the newest cached watched event. + // It is necessary to effectively allow clients to start watching at now. + // NOTE: We assume that is thread-safe. + store cache.Indexer + + // ResourceVersion up to which the watchCache is propagated. + resourceVersion uint64 + + // ResourceVersion of the last list result (populated via Replace() method). + listResourceVersion uint64 + + // This handler is run at the end of every successful Replace() method. + onReplace func() + + // This handler is run at the end of every Add/Update/Delete method + // and additionally gets the previous value of the object. + eventHandler func(*watchCacheEvent) + + // for testing timeouts. + clock clock.Clock + + // An underlying storage.Versioner. + versioner storage.Versioner + + // cacher's objectType. + objectType reflect.Type +} + +func newWatchCache( + keyFunc func(runtime.Object) (string, error), + eventHandler func(*watchCacheEvent), + getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error), + versioner storage.Versioner, + indexers *cache.Indexers, + clock clock.Clock, + objectType reflect.Type) *watchCache { + wc := &watchCache{ + capacity: defaultLowerBoundCapacity, + keyFunc: keyFunc, + getAttrsFunc: getAttrsFunc, + cache: make([]*watchCacheEvent, defaultLowerBoundCapacity), + lowerBoundCapacity: defaultLowerBoundCapacity, + upperBoundCapacity: defaultUpperBoundCapacity, + startIndex: 0, + endIndex: 0, + store: cache.NewIndexer(storeElementKey, storeElementIndexers(indexers)), + resourceVersion: 0, + listResourceVersion: 0, + eventHandler: eventHandler, + clock: clock, + versioner: versioner, + objectType: objectType, + } + objType := objectType.String() + watchCacheCapacity.WithLabelValues(objType).Set(float64(wc.capacity)) + wc.cond = sync.NewCond(wc.RLocker()) + return wc +} + +// Add takes runtime.Object as an argument. +func (w *watchCache) Add(obj interface{}) error { + object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Added, Object: object} + + f := func(elem *storeElement) error { return w.store.Add(elem) } + return w.processEvent(event, resourceVersion, f) +} + +// Update takes runtime.Object as an argument. +func (w *watchCache) Update(obj interface{}) error { + object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Modified, Object: object} + + f := func(elem *storeElement) error { return w.store.Update(elem) } + return w.processEvent(event, resourceVersion, f) +} + +// Delete takes runtime.Object as an argument. +func (w *watchCache) Delete(obj interface{}) error { + object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Deleted, Object: object} + + f := func(elem *storeElement) error { return w.store.Delete(elem) } + return w.processEvent(event, resourceVersion, f) +} + +func (w *watchCache) objectToVersionedRuntimeObject(obj interface{}) (runtime.Object, uint64, error) { + object, ok := obj.(runtime.Object) + if !ok { + return nil, 0, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) + } + resourceVersion, err := w.versioner.ObjectResourceVersion(object) + if err != nil { + return nil, 0, err + } + return object, resourceVersion, nil +} + +// processEvent is safe as long as there is at most one call to it in flight +// at any point in time. +func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(*storeElement) error) error { + key, err := w.keyFunc(event.Object) + if err != nil { + return fmt.Errorf("couldn't compute key: %v", err) + } + elem := &storeElement{Key: key, Object: event.Object} + elem.Labels, elem.Fields, err = w.getAttrsFunc(event.Object) + if err != nil { + return err + } + + wcEvent := &watchCacheEvent{ + Type: event.Type, + Object: elem.Object, + ObjLabels: elem.Labels, + ObjFields: elem.Fields, + Key: key, + ResourceVersion: resourceVersion, + RecordTime: w.clock.Now(), + } + + if err := func() error { + // TODO: We should consider moving this lock below after the watchCacheEvent + // is created. In such situation, the only problematic scenario is Replace( + // happening after getting object from store and before acquiring a lock. + // Maybe introduce another lock for this purpose. + w.Lock() + defer w.Unlock() + + previous, exists, err := w.store.Get(elem) + if err != nil { + return err + } + if exists { + previousElem := previous.(*storeElement) + wcEvent.PrevObject = previousElem.Object + wcEvent.PrevObjLabels = previousElem.Labels + wcEvent.PrevObjFields = previousElem.Fields + } + + w.updateCache(wcEvent) + w.resourceVersion = resourceVersion + defer w.cond.Broadcast() + + return updateFunc(elem) + }(); err != nil { + return err + } + + // Avoid calling event handler under lock. + // This is safe as long as there is at most one call to Add/Update/Delete and + // UpdateResourceVersion in flight at any point in time, which is true now, + // because reflector calls them synchronously from its main thread. + if w.eventHandler != nil { + w.eventHandler(wcEvent) + } + return nil +} + +// Assumes that lock is already held for write. +func (w *watchCache) updateCache(event *watchCacheEvent) { + w.resizeCacheLocked(event.RecordTime) + if w.isCacheFullLocked() { + // Cache is full - remove the oldest element. + w.startIndex++ + } + w.cache[w.endIndex%w.capacity] = event + w.endIndex++ +} + +// resizeCacheLocked resizes the cache if necessary: +// - increases capacity by 2x if cache is full and all cached events occurred within last eventFreshDuration. +// - decreases capacity by 2x when recent quarter of events occurred outside of eventFreshDuration(protect watchCache from flapping). +func (w *watchCache) resizeCacheLocked(eventTime time.Time) { + if w.isCacheFullLocked() && eventTime.Sub(w.cache[w.startIndex%w.capacity].RecordTime) < eventFreshDuration { + capacity := min(w.capacity*2, w.upperBoundCapacity) + if capacity > w.capacity { + w.doCacheResizeLocked(capacity) + } + return + } + if w.isCacheFullLocked() && eventTime.Sub(w.cache[(w.endIndex-w.capacity/4)%w.capacity].RecordTime) > eventFreshDuration { + capacity := max(w.capacity/2, w.lowerBoundCapacity) + if capacity < w.capacity { + w.doCacheResizeLocked(capacity) + } + return + } +} + +// isCacheFullLocked used to judge whether watchCacheEvent is full. +// Assumes that lock is already held for write. +func (w *watchCache) isCacheFullLocked() bool { + return w.endIndex == w.startIndex+w.capacity +} + +// doCacheResizeLocked resize watchCache's event array with different capacity. +// Assumes that lock is already held for write. +func (w *watchCache) doCacheResizeLocked(capacity int) { + newCache := make([]*watchCacheEvent, capacity) + if capacity < w.capacity { + // adjust startIndex if cache capacity shrink. + w.startIndex = w.endIndex - capacity + } + for i := w.startIndex; i < w.endIndex; i++ { + newCache[i%capacity] = w.cache[i%w.capacity] + } + w.cache = newCache + recordsWatchCacheCapacityChange(w.objectType.String(), w.capacity, capacity) + w.capacity = capacity +} + +func (w *watchCache) UpdateResourceVersion(resourceVersion string) { + rv, err := w.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + klog.Errorf("Couldn't parse resourceVersion: %v", err) + return + } + + func() { + w.Lock() + defer w.Unlock() + w.resourceVersion = rv + }() + + // Avoid calling event handler under lock. + // This is safe as long as there is at most one call to Add/Update/Delete and + // UpdateResourceVersion in flight at any point in time, which is true now, + // because reflector calls them synchronously from its main thread. + if w.eventHandler != nil { + wcEvent := &watchCacheEvent{ + Type: watch.Bookmark, + ResourceVersion: rv, + } + w.eventHandler(wcEvent) + } +} + +// List returns list of pointers to objects. +func (w *watchCache) List() []interface{} { + return w.store.List() +} + +// waitUntilFreshAndBlock waits until cache is at least as fresh as given . +// NOTE: This function acquired lock and doesn't release it. +// You HAVE TO explicitly call w.RUnlock() after this function. +func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *utiltrace.Trace) error { + startTime := w.clock.Now() + go func() { + // Wake us up when the time limit has expired. The docs + // promise that time.After (well, NewTimer, which it calls) + // will wait *at least* the duration given. Since this go + // routine starts sometime after we record the start time, and + // it will wake up the loop below sometime after the broadcast, + // we don't need to worry about waking it up before the time + // has expired accidentally. + <-w.clock.After(blockTimeout) + w.cond.Broadcast() + }() + + w.RLock() + if trace != nil { + trace.Step("watchCache locked acquired") + } + for w.resourceVersion < resourceVersion { + if w.clock.Since(startTime) >= blockTimeout { + // Request that the client retry after 'resourceVersionTooHighRetrySeconds' seconds. + return storage.NewTooLargeResourceVersionError(resourceVersion, w.resourceVersion, resourceVersionTooHighRetrySeconds) + } + w.cond.Wait() + } + if trace != nil { + trace.Step("watchCache fresh enough") + } + return nil +} + +// WaitUntilFreshAndList returns list of pointers to objects. +func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64, matchValues []storage.MatchValue, trace *utiltrace.Trace) ([]interface{}, uint64, error) { + err := w.waitUntilFreshAndBlock(resourceVersion, trace) + defer w.RUnlock() + if err != nil { + return nil, 0, err + } + + // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only + // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we + // want - they will be filtered out later. The fact that we return less things is only further performance improvement. + // TODO: if multiple indexes match, return the one with the fewest items, so as to do as much filtering as possible. + for _, matchValue := range matchValues { + if result, err := w.store.ByIndex(matchValue.IndexName, matchValue.Value); err == nil { + return result, w.resourceVersion, nil + } + } + return w.store.List(), w.resourceVersion, nil +} + +// WaitUntilFreshAndGet returns a pointers to object. +func (w *watchCache) WaitUntilFreshAndGet(resourceVersion uint64, key string, trace *utiltrace.Trace) (interface{}, bool, uint64, error) { + err := w.waitUntilFreshAndBlock(resourceVersion, trace) + defer w.RUnlock() + if err != nil { + return nil, false, 0, err + } + value, exists, err := w.store.GetByKey(key) + return value, exists, w.resourceVersion, err +} + +func (w *watchCache) ListKeys() []string { + return w.store.ListKeys() +} + +// Get takes runtime.Object as a parameter. However, it returns +// pointer to . +func (w *watchCache) Get(obj interface{}) (interface{}, bool, error) { + object, ok := obj.(runtime.Object) + if !ok { + return nil, false, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) + } + key, err := w.keyFunc(object) + if err != nil { + return nil, false, fmt.Errorf("couldn't compute key: %v", err) + } + + return w.store.Get(&storeElement{Key: key, Object: object}) +} + +// GetByKey returns pointer to . +func (w *watchCache) GetByKey(key string) (interface{}, bool, error) { + return w.store.GetByKey(key) +} + +// Replace takes slice of runtime.Object as a parameter. +func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error { + version, err := w.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + + toReplace := make([]interface{}, 0, len(objs)) + for _, obj := range objs { + object, ok := obj.(runtime.Object) + if !ok { + return fmt.Errorf("didn't get runtime.Object for replace: %#v", obj) + } + key, err := w.keyFunc(object) + if err != nil { + return fmt.Errorf("couldn't compute key: %v", err) + } + objLabels, objFields, err := w.getAttrsFunc(object) + if err != nil { + return err + } + toReplace = append(toReplace, &storeElement{ + Key: key, + Object: object, + Labels: objLabels, + Fields: objFields, + }) + } + + w.Lock() + defer w.Unlock() + + w.startIndex = 0 + w.endIndex = 0 + if err := w.store.Replace(toReplace, resourceVersion); err != nil { + return err + } + w.listResourceVersion = version + w.resourceVersion = version + if w.onReplace != nil { + w.onReplace() + } + w.cond.Broadcast() + klog.V(3).Infof("Replace watchCache (rev: %v) ", resourceVersion) + return nil +} + +func (w *watchCache) SetOnReplace(onReplace func()) { + w.Lock() + defer w.Unlock() + w.onReplace = onReplace +} + +func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*watchCacheEvent, error) { + size := w.endIndex - w.startIndex + var oldest uint64 + switch { + case w.listResourceVersion > 0 && w.startIndex == 0: + // If no event was removed from the buffer since last relist, the oldest watch + // event we can deliver is one greater than the resource version of the list. + oldest = w.listResourceVersion + 1 + case size > 0: + // If the previous condition is not satisfied: either some event was already + // removed from the buffer or we've never completed a list (the latter can + // only happen in unit tests that populate the buffer without performing + // list/replace operations), the oldest watch event we can deliver is the first + // one in the buffer. + oldest = w.cache[w.startIndex%w.capacity].ResourceVersion + default: + return nil, fmt.Errorf("watch cache isn't correctly initialized") + } + + if resourceVersion == 0 { + // resourceVersion = 0 means that we don't require any specific starting point + // and we would like to start watching from ~now. + // However, to keep backward compatibility, we additionally need to return the + // current state and only then start watching from that point. + // + // TODO: In v2 api, we should stop returning the current state - #13969. + allItems := w.store.List() + result := make([]*watchCacheEvent, len(allItems)) + for i, item := range allItems { + elem, ok := item.(*storeElement) + if !ok { + return nil, fmt.Errorf("not a storeElement: %v", elem) + } + objLabels, objFields, err := w.getAttrsFunc(elem.Object) + if err != nil { + return nil, err + } + result[i] = &watchCacheEvent{ + Type: watch.Added, + Object: elem.Object, + ObjLabels: objLabels, + ObjFields: objFields, + Key: elem.Key, + ResourceVersion: w.resourceVersion, + } + } + return result, nil + } + if resourceVersion < oldest-1 { + return nil, errors.NewResourceExpired(fmt.Sprintf("too old resource version: %d (%d)", resourceVersion, oldest-1)) + } + + // Binary search the smallest index at which resourceVersion is greater than the given one. + f := func(i int) bool { + return w.cache[(w.startIndex+i)%w.capacity].ResourceVersion > resourceVersion + } + first := sort.Search(size, f) + result := make([]*watchCacheEvent, size-first) + for i := 0; i < size-first; i++ { + result[i] = w.cache[(w.startIndex+first+i)%w.capacity] + } + return result, nil +} + +func (w *watchCache) GetAllEventsSince(resourceVersion uint64) ([]*watchCacheEvent, error) { + w.RLock() + defer w.RUnlock() + return w.GetAllEventsSinceThreadUnsafe(resourceVersion) +} + +func (w *watchCache) Resync() error { + // Nothing to do + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go b/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go new file mode 100644 index 000000000..e251b6168 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package storage provides conversion of storage errors to API errors. +package storage // import "k8s.io/apiserver/pkg/storage/errors" diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go b/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go new file mode 100644 index 000000000..fd3b35ed0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/errors/storage.go @@ -0,0 +1,116 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/storage" +) + +// InterpretListError converts a generic error on a retrieval +// operation into the appropriate API error. +func InterpretListError(err error, qualifiedResource schema.GroupResource) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, "") + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "list", 2) // TODO: make configurable or handled at a higher level + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretGetError converts a generic error on a retrieval +// operation into the appropriate API error. +func InterpretGetError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "get", 2) // TODO: make configurable or handled at a higher level + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretCreateError converts a generic error on a create +// operation into the appropriate API error. +func InterpretCreateError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsNodeExist(err): + return errors.NewAlreadyExists(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "create", 2) // TODO: make configurable or handled at a higher level + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretUpdateError converts a generic error on an update +// operation into the appropriate API error. +func InterpretUpdateError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsConflict(err), storage.IsNodeExist(err), storage.IsInvalidObj(err): + return errors.NewConflict(qualifiedResource, name, err) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "update", 2) // TODO: make configurable or handled at a higher level + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretDeleteError converts a generic error on a delete +// operation into the appropriate API error. +func InterpretDeleteError(err error, qualifiedResource schema.GroupResource, name string) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "delete", 2) // TODO: make configurable or handled at a higher level + case storage.IsConflict(err), storage.IsNodeExist(err), storage.IsInvalidObj(err): + return errors.NewConflict(qualifiedResource, name, err) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretWatchError converts a generic error on a watch +// operation into the appropriate API error. +func InterpretWatchError(err error, resource schema.GroupResource, name string) error { + switch { + case storage.IsInvalidError(err): + invalidError, _ := err.(storage.InvalidError) + return errors.NewInvalid(schema.GroupKind{Group: resource.Group, Kind: resource.Resource}, name, invalidError.Errs) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go new file mode 100644 index 000000000..daa82f711 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go @@ -0,0 +1,152 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package aes transforms values for storage at rest using AES-GCM. +package aes + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + "fmt" + "io" + + "k8s.io/apiserver/pkg/storage/value" +) + +// gcm implements AEAD encryption of the provided values given a cipher.Block algorithm. +// The authenticated data provided as part of the value.Context method must match when the same +// value is set to and loaded from storage. In order to ensure that values cannot be copied by +// an attacker from a location under their control, use characteristics of the storage location +// (such as the etcd key) as part of the authenticated data. +// +// Because this mode requires a generated IV and IV reuse is a known weakness of AES-GCM, keys +// must be rotated before a birthday attack becomes feasible. NIST SP 800-38D +// (http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf) recommends using the same +// key with random 96-bit nonces (the default nonce length) no more than 2^32 times, and +// therefore transformers using this implementation *must* ensure they allow for frequent key +// rotation. Future work should include investigation of AES-GCM-SIV as an alternative to +// random nonces. +type gcm struct { + block cipher.Block +} + +// NewGCMTransformer takes the given block cipher and performs encryption and decryption on the given +// data. +func NewGCMTransformer(block cipher.Block) value.Transformer { + return &gcm{block: block} +} + +func (t *gcm) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + aead, err := cipher.NewGCM(t.block) + if err != nil { + return nil, false, err + } + nonceSize := aead.NonceSize() + if len(data) < nonceSize { + return nil, false, fmt.Errorf("the stored data was shorter than the required size") + } + result, err := aead.Open(nil, data[:nonceSize], data[nonceSize:], context.AuthenticatedData()) + return result, false, err +} + +func (t *gcm) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + aead, err := cipher.NewGCM(t.block) + if err != nil { + return nil, err + } + nonceSize := aead.NonceSize() + result := make([]byte, nonceSize+aead.Overhead()+len(data)) + n, err := rand.Read(result[:nonceSize]) + if err != nil { + return nil, err + } + if n != nonceSize { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + cipherText := aead.Seal(result[nonceSize:nonceSize], result[:nonceSize], data, context.AuthenticatedData()) + return result[:nonceSize+len(cipherText)], nil +} + +// cbc implements encryption at rest of the provided values given a cipher.Block algorithm. +type cbc struct { + block cipher.Block +} + +// NewCBCTransformer takes the given block cipher and performs encryption and decryption on the given +// data. +func NewCBCTransformer(block cipher.Block) value.Transformer { + return &cbc{block: block} +} + +var ( + errInvalidBlockSize = fmt.Errorf("the stored data is not a multiple of the block size") + errInvalidPKCS7Data = errors.New("invalid PKCS7 data (empty or not padded)") + errInvalidPKCS7Padding = errors.New("invalid padding on input") +) + +func (t *cbc) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + blockSize := aes.BlockSize + if len(data) < blockSize { + return nil, false, fmt.Errorf("the stored data was shorter than the required size") + } + iv := data[:blockSize] + data = data[blockSize:] + + if len(data)%blockSize != 0 { + return nil, false, errInvalidBlockSize + } + + result := make([]byte, len(data)) + copy(result, data) + mode := cipher.NewCBCDecrypter(t.block, iv) + mode.CryptBlocks(result, result) + + // remove and verify PKCS#7 padding for CBC + c := result[len(result)-1] + paddingSize := int(c) + size := len(result) - paddingSize + if paddingSize == 0 || paddingSize > len(result) { + return nil, false, errInvalidPKCS7Data + } + for i := 0; i < paddingSize; i++ { + if result[size+i] != c { + return nil, false, errInvalidPKCS7Padding + } + } + + return result[:size], false, nil +} + +func (t *cbc) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + blockSize := aes.BlockSize + paddingSize := blockSize - (len(data) % blockSize) + result := make([]byte, blockSize+len(data)+paddingSize) + iv := result[:blockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + copy(result[blockSize:], data) + + // add PKCS#7 padding for CBC + copy(result[blockSize+len(data):], bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)) + + mode := cipher.NewCBCEncrypter(t.block, iv) + mode.CryptBlocks(result[blockSize:], result[blockSize:]) + return result, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go new file mode 100644 index 000000000..357ea0582 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go @@ -0,0 +1,196 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envelope transforms values for storage at rest using a Envelope provider +package envelope + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "fmt" + "time" + + "k8s.io/apiserver/pkg/storage/value" + "k8s.io/utils/lru" + + "golang.org/x/crypto/cryptobyte" +) + +func init() { + value.RegisterMetrics() + registerMetrics() +} + +// Service allows encrypting and decrypting data using an external Key Management Service. +type Service interface { + // Decrypt a given bytearray to obtain the original data as bytes. + Decrypt(data []byte) ([]byte, error) + // Encrypt bytes to a ciphertext. + Encrypt(data []byte) ([]byte, error) +} + +type envelopeTransformer struct { + envelopeService Service + + // transformers is a thread-safe LRU cache which caches decrypted DEKs indexed by their encrypted form. + transformers *lru.Cache + + // baseTransformerFunc creates a new transformer for encrypting the data with the DEK. + baseTransformerFunc func(cipher.Block) value.Transformer + + cacheSize int + cacheEnabled bool +} + +// NewEnvelopeTransformer returns a transformer which implements a KEK-DEK based envelope encryption scheme. +// It uses envelopeService to encrypt and decrypt DEKs. Respective DEKs (in encrypted form) are prepended to +// the data items they encrypt. A cache (of size cacheSize) is maintained to store the most recently +// used decrypted DEKs in memory. +func NewEnvelopeTransformer(envelopeService Service, cacheSize int, baseTransformerFunc func(cipher.Block) value.Transformer) (value.Transformer, error) { + var ( + cache *lru.Cache + ) + + if cacheSize > 0 { + cache = lru.New(cacheSize) + } + return &envelopeTransformer{ + envelopeService: envelopeService, + transformers: cache, + baseTransformerFunc: baseTransformerFunc, + cacheEnabled: cacheSize > 0, + cacheSize: cacheSize, + }, nil +} + +// TransformFromStorage decrypts data encrypted by this transformer using envelope encryption. +func (t *envelopeTransformer) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + recordArrival(fromStorageLabel, time.Now()) + + // Read the 16 bit length-of-DEK encoded at the start of the encrypted DEK. 16 bits can + // represent a maximum key length of 65536 bytes. We are using a 256 bit key, whose + // length cannot fit in 8 bits (1 byte). Thus, we use 16 bits (2 bytes) to store the length. + var encKey cryptobyte.String + s := cryptobyte.String(data) + if ok := s.ReadUint16LengthPrefixed(&encKey); !ok { + return nil, false, fmt.Errorf("invalid data encountered by envelope transformer: failed to read uint16 length prefixed data") + } + + encData := []byte(s) + + // Look up the decrypted DEK from cache or Envelope. + transformer := t.getTransformer(encKey) + if transformer == nil { + if t.cacheEnabled { + value.RecordCacheMiss() + } + key, err := t.envelopeService.Decrypt(encKey) + if err != nil { + // Do NOT wrap this err using fmt.Errorf() or similar functions + // because this gRPC status error has useful error code when + // record the metric. + return nil, false, err + } + + transformer, err = t.addTransformer(encKey, key) + if err != nil { + return nil, false, err + } + } + + return transformer.TransformFromStorage(encData, context) +} + +// TransformToStorage encrypts data to be written to disk using envelope encryption. +func (t *envelopeTransformer) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + recordArrival(toStorageLabel, time.Now()) + newKey, err := generateKey(32) + if err != nil { + return nil, err + } + + encKey, err := t.envelopeService.Encrypt(newKey) + if err != nil { + // Do NOT wrap this err using fmt.Errorf() or similar functions + // because this gRPC status error has useful error code when + // record the metric. + return nil, err + } + + transformer, err := t.addTransformer(encKey, newKey) + if err != nil { + return nil, err + } + + result, err := transformer.TransformToStorage(data, context) + if err != nil { + return nil, err + } + // Append the length of the encrypted DEK as the first 2 bytes. + b := cryptobyte.NewBuilder(nil) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(encKey)) + }) + b.AddBytes(result) + + return b.Bytes() +} + +var _ value.Transformer = &envelopeTransformer{} + +// addTransformer inserts a new transformer to the Envelope cache of DEKs for future reads. +func (t *envelopeTransformer) addTransformer(encKey []byte, key []byte) (value.Transformer, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + transformer := t.baseTransformerFunc(block) + // Use base64 of encKey as the key into the cache because hashicorp/golang-lru + // cannot hash []uint8. + if t.cacheEnabled { + t.transformers.Add(base64.StdEncoding.EncodeToString(encKey), transformer) + dekCacheFillPercent.Set(float64(t.transformers.Len()) / float64(t.cacheSize)) + } + return transformer, nil +} + +// getTransformer fetches the transformer corresponding to encKey from cache, if it exists. +func (t *envelopeTransformer) getTransformer(encKey []byte) value.Transformer { + if !t.cacheEnabled { + return nil + } + + _transformer, found := t.transformers.Get(base64.StdEncoding.EncodeToString(encKey)) + if found { + return _transformer.(value.Transformer) + } + return nil +} + +// generateKey generates a random key using system randomness. +func generateKey(length int) (key []byte, err error) { + defer func(start time.Time) { + value.RecordDataKeyGeneration(start, err) + }(time.Now()) + key = make([]byte, length) + if _, err = rand.Read(key); err != nil { + return nil, err + } + + return key, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go new file mode 100644 index 000000000..7aa5d232f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go @@ -0,0 +1,181 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envelope transforms values for storage at rest using a Envelope provider +package envelope + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + "sync" + "time" + + "k8s.io/klog/v2" + + "google.golang.org/grpc" + + kmsapi "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1" +) + +const ( + // Now only supported unix domain socket. + unixProtocol = "unix" + + // Current version for the protocol interface definition. + kmsapiVersion = "v1beta1" + + versionErrorf = "KMS provider api version %s is not supported, only %s is supported now" +) + +// The gRPC implementation for envelope.Service. +type gRPCService struct { + kmsClient kmsapi.KeyManagementServiceClient + connection *grpc.ClientConn + callTimeout time.Duration + mux sync.RWMutex + versionChecked bool +} + +// NewGRPCService returns an envelope.Service which use gRPC to communicate the remote KMS provider. +func NewGRPCService(endpoint string, callTimeout time.Duration) (Service, error) { + klog.V(4).Infof("Configure KMS provider with endpoint: %s", endpoint) + + addr, err := parseEndpoint(endpoint) + if err != nil { + return nil, err + } + + s := &gRPCService{callTimeout: callTimeout} + s.connection, err = grpc.Dial( + addr, + grpc.WithInsecure(), + grpc.WithUnaryInterceptor(s.interceptor), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithContextDialer( + func(context.Context, string) (net.Conn, error) { + // Ignoring addr and timeout arguments: + // addr - comes from the closure + c, err := net.DialUnix(unixProtocol, nil, &net.UnixAddr{Name: addr}) + if err != nil { + klog.Errorf("failed to create connection to unix socket: %s, error: %v", addr, err) + } else { + klog.V(4).Infof("Successfully dialed Unix socket %v", addr) + } + return c, err + })) + + if err != nil { + return nil, fmt.Errorf("failed to create connection to %s, error: %v", endpoint, err) + } + + s.kmsClient = kmsapi.NewKeyManagementServiceClient(s.connection) + return s, nil +} + +// Parse the endpoint to extract schema, host or path. +func parseEndpoint(endpoint string) (string, error) { + if len(endpoint) == 0 { + return "", fmt.Errorf("remote KMS provider can't use empty string as endpoint") + } + + u, err := url.Parse(endpoint) + if err != nil { + return "", fmt.Errorf("invalid endpoint %q for remote KMS provider, error: %v", endpoint, err) + } + + if u.Scheme != unixProtocol { + return "", fmt.Errorf("unsupported scheme %q for remote KMS provider", u.Scheme) + } + + // Linux abstract namespace socket - no physical file required + // Warning: Linux Abstract sockets have not concept of ACL (unlike traditional file based sockets). + // However, Linux Abstract sockets are subject to Linux networking namespace, so will only be accessible to + // containers within the same pod (unless host networking is used). + if strings.HasPrefix(u.Path, "/@") { + return strings.TrimPrefix(u.Path, "/"), nil + } + + return u.Path, nil +} + +func (g *gRPCService) checkAPIVersion(ctx context.Context) error { + g.mux.Lock() + defer g.mux.Unlock() + + if g.versionChecked { + return nil + } + + request := &kmsapi.VersionRequest{Version: kmsapiVersion} + response, err := g.kmsClient.Version(ctx, request) + if err != nil { + return fmt.Errorf("failed get version from remote KMS provider: %v", err) + } + if response.Version != kmsapiVersion { + return fmt.Errorf(versionErrorf, response.Version, kmsapiVersion) + } + g.versionChecked = true + + klog.V(4).Infof("Version of KMS provider is %s", response.Version) + return nil +} + +// Decrypt a given data string to obtain the original byte data. +func (g *gRPCService) Decrypt(cipher []byte) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), g.callTimeout) + defer cancel() + + request := &kmsapi.DecryptRequest{Cipher: cipher, Version: kmsapiVersion} + response, err := g.kmsClient.Decrypt(ctx, request) + if err != nil { + return nil, err + } + return response.Plain, nil +} + +// Encrypt bytes to a string ciphertext. +func (g *gRPCService) Encrypt(plain []byte) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), g.callTimeout) + defer cancel() + + request := &kmsapi.EncryptRequest{Plain: plain, Version: kmsapiVersion} + response, err := g.kmsClient.Encrypt(ctx, request) + if err != nil { + return nil, err + } + return response.Cipher, nil +} + +func (g *gRPCService) interceptor( + ctx context.Context, + method string, + req interface{}, + reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, +) error { + if !kmsapi.IsVersionCheckMethod(method) { + if err := g.checkAPIVersion(ctx); err != nil { + return err + } + } + + return invoker(ctx, method, req, reply, cc, opts...) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go new file mode 100644 index 000000000..e5499f1e1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envelope + +import ( + "sync" + "time" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + namespace = "apiserver" + subsystem = "envelope_encryption" + fromStorageLabel = "from_storage" + toStorageLabel = "to_storage" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + lockLastFromStorage sync.Mutex + lockLastToStorage sync.Mutex + + lastFromStorage time.Time + lastToStorage time.Time + + dekCacheFillPercent = metrics.NewGauge( + &metrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dek_cache_fill_percent", + Help: "Percent of the cache slots currently occupied by cached DEKs.", + StabilityLevel: metrics.ALPHA, + }, + ) + + dekCacheInterArrivals = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dek_cache_inter_arrival_time_seconds", + Help: "Time (in seconds) of inter arrival of transformation requests.", + StabilityLevel: metrics.ALPHA, + Buckets: metrics.ExponentialBuckets(60, 2, 10), + }, + []string{"transformation_type"}, + ) +) + +var registerMetricsFunc sync.Once + +func registerMetrics() { + registerMetricsFunc.Do(func() { + legacyregistry.MustRegister(dekCacheFillPercent) + legacyregistry.MustRegister(dekCacheInterArrivals) + }) +} + +func recordArrival(transformationType string, start time.Time) { + switch transformationType { + case fromStorageLabel: + lockLastFromStorage.Lock() + defer lockLastFromStorage.Unlock() + + if lastFromStorage.IsZero() { + lastFromStorage = start + } + dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastFromStorage).Seconds()) + lastFromStorage = start + case toStorageLabel: + lockLastToStorage.Lock() + defer lockLastToStorage.Unlock() + + if lastToStorage.IsZero() { + lastToStorage = start + } + dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastToStorage).Seconds()) + lastToStorage = start + } +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go new file mode 100644 index 000000000..0d71bb2ba --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.pb.go @@ -0,0 +1,502 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: service.proto + +package v1beta1 + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type VersionRequest struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionRequest) Reset() { *m = VersionRequest{} } +func (m *VersionRequest) String() string { return proto.CompactTextString(m) } +func (*VersionRequest) ProtoMessage() {} +func (*VersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{0} +} +func (m *VersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionRequest.Unmarshal(m, b) +} +func (m *VersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionRequest.Marshal(b, m, deterministic) +} +func (m *VersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionRequest.Merge(m, src) +} +func (m *VersionRequest) XXX_Size() int { + return xxx_messageInfo_VersionRequest.Size(m) +} +func (m *VersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionRequest proto.InternalMessageInfo + +func (m *VersionRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type VersionResponse struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Name of the KMS provider. + RuntimeName string `protobuf:"bytes,2,opt,name=runtime_name,json=runtimeName,proto3" json:"runtime_name,omitempty"` + // Version of the KMS provider. The string must be semver-compatible. + RuntimeVersion string `protobuf:"bytes,3,opt,name=runtime_version,json=runtimeVersion,proto3" json:"runtime_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionResponse) Reset() { *m = VersionResponse{} } +func (m *VersionResponse) String() string { return proto.CompactTextString(m) } +func (*VersionResponse) ProtoMessage() {} +func (*VersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{1} +} +func (m *VersionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionResponse.Unmarshal(m, b) +} +func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic) +} +func (m *VersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionResponse.Merge(m, src) +} +func (m *VersionResponse) XXX_Size() int { + return xxx_messageInfo_VersionResponse.Size(m) +} +func (m *VersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VersionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionResponse proto.InternalMessageInfo + +func (m *VersionResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *VersionResponse) GetRuntimeName() string { + if m != nil { + return m.RuntimeName + } + return "" +} + +func (m *VersionResponse) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +type DecryptRequest struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // The data to be decrypted. + Cipher []byte `protobuf:"bytes,2,opt,name=cipher,proto3" json:"cipher,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecryptRequest) Reset() { *m = DecryptRequest{} } +func (m *DecryptRequest) String() string { return proto.CompactTextString(m) } +func (*DecryptRequest) ProtoMessage() {} +func (*DecryptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{2} +} +func (m *DecryptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecryptRequest.Unmarshal(m, b) +} +func (m *DecryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecryptRequest.Marshal(b, m, deterministic) +} +func (m *DecryptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecryptRequest.Merge(m, src) +} +func (m *DecryptRequest) XXX_Size() int { + return xxx_messageInfo_DecryptRequest.Size(m) +} +func (m *DecryptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DecryptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DecryptRequest proto.InternalMessageInfo + +func (m *DecryptRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *DecryptRequest) GetCipher() []byte { + if m != nil { + return m.Cipher + } + return nil +} + +type DecryptResponse struct { + // The decrypted data. + Plain []byte `protobuf:"bytes,1,opt,name=plain,proto3" json:"plain,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecryptResponse) Reset() { *m = DecryptResponse{} } +func (m *DecryptResponse) String() string { return proto.CompactTextString(m) } +func (*DecryptResponse) ProtoMessage() {} +func (*DecryptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{3} +} +func (m *DecryptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecryptResponse.Unmarshal(m, b) +} +func (m *DecryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecryptResponse.Marshal(b, m, deterministic) +} +func (m *DecryptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecryptResponse.Merge(m, src) +} +func (m *DecryptResponse) XXX_Size() int { + return xxx_messageInfo_DecryptResponse.Size(m) +} +func (m *DecryptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DecryptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DecryptResponse proto.InternalMessageInfo + +func (m *DecryptResponse) GetPlain() []byte { + if m != nil { + return m.Plain + } + return nil +} + +type EncryptRequest struct { + // Version of the KMS plugin API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // The data to be encrypted. + Plain []byte `protobuf:"bytes,2,opt,name=plain,proto3" json:"plain,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptRequest) Reset() { *m = EncryptRequest{} } +func (m *EncryptRequest) String() string { return proto.CompactTextString(m) } +func (*EncryptRequest) ProtoMessage() {} +func (*EncryptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{4} +} +func (m *EncryptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptRequest.Unmarshal(m, b) +} +func (m *EncryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptRequest.Marshal(b, m, deterministic) +} +func (m *EncryptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptRequest.Merge(m, src) +} +func (m *EncryptRequest) XXX_Size() int { + return xxx_messageInfo_EncryptRequest.Size(m) +} +func (m *EncryptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptRequest proto.InternalMessageInfo + +func (m *EncryptRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *EncryptRequest) GetPlain() []byte { + if m != nil { + return m.Plain + } + return nil +} + +type EncryptResponse struct { + // The encrypted data. + Cipher []byte `protobuf:"bytes,1,opt,name=cipher,proto3" json:"cipher,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptResponse) Reset() { *m = EncryptResponse{} } +func (m *EncryptResponse) String() string { return proto.CompactTextString(m) } +func (*EncryptResponse) ProtoMessage() {} +func (*EncryptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a0b84a42fa06f626, []int{5} +} +func (m *EncryptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptResponse.Unmarshal(m, b) +} +func (m *EncryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptResponse.Marshal(b, m, deterministic) +} +func (m *EncryptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptResponse.Merge(m, src) +} +func (m *EncryptResponse) XXX_Size() int { + return xxx_messageInfo_EncryptResponse.Size(m) +} +func (m *EncryptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptResponse proto.InternalMessageInfo + +func (m *EncryptResponse) GetCipher() []byte { + if m != nil { + return m.Cipher + } + return nil +} + +func init() { + proto.RegisterType((*VersionRequest)(nil), "v1beta1.VersionRequest") + proto.RegisterType((*VersionResponse)(nil), "v1beta1.VersionResponse") + proto.RegisterType((*DecryptRequest)(nil), "v1beta1.DecryptRequest") + proto.RegisterType((*DecryptResponse)(nil), "v1beta1.DecryptResponse") + proto.RegisterType((*EncryptRequest)(nil), "v1beta1.EncryptRequest") + proto.RegisterType((*EncryptResponse)(nil), "v1beta1.EncryptResponse") +} + +func init() { proto.RegisterFile("service.proto", fileDescriptor_a0b84a42fa06f626) } + +var fileDescriptor_a0b84a42fa06f626 = []byte{ + // 287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xcd, 0x4a, 0xc4, 0x30, + 0x10, 0xde, 0xae, 0xb8, 0xc5, 0xb1, 0xb6, 0x10, 0x16, 0x2d, 0x9e, 0x34, 0x97, 0x55, 0x0f, 0x85, + 0xd5, 0xbb, 0x88, 0xe8, 0x49, 0xf4, 0x50, 0xc1, 0xab, 0x64, 0xcb, 0xa0, 0x05, 0x9b, 0xc6, 0x24, + 0x5b, 0xd9, 0x17, 0xf5, 0x79, 0xc4, 0x66, 0x5a, 0xd3, 0x15, 0x71, 0x8f, 0x33, 0x99, 0xef, 0x6f, + 0x26, 0xb0, 0x67, 0x50, 0x37, 0x65, 0x81, 0x99, 0xd2, 0xb5, 0xad, 0x59, 0xd8, 0xcc, 0x17, 0x68, + 0xc5, 0x9c, 0x9f, 0x41, 0xfc, 0x84, 0xda, 0x94, 0xb5, 0xcc, 0xf1, 0x7d, 0x89, 0xc6, 0xb2, 0x14, + 0xc2, 0xc6, 0x75, 0xd2, 0xe0, 0x28, 0x38, 0xd9, 0xc9, 0xbb, 0x92, 0x7f, 0x40, 0xd2, 0xcf, 0x1a, + 0x55, 0x4b, 0x83, 0x7f, 0x0f, 0xb3, 0x63, 0x88, 0xf4, 0x52, 0xda, 0xb2, 0xc2, 0x67, 0x29, 0x2a, + 0x4c, 0xc7, 0xed, 0xf3, 0x2e, 0xf5, 0x1e, 0x44, 0x85, 0x6c, 0x06, 0x49, 0x37, 0xd2, 0x91, 0x6c, + 0xb5, 0x53, 0x31, 0xb5, 0x49, 0x8d, 0x5f, 0x43, 0x7c, 0x83, 0x85, 0x5e, 0x29, 0xfb, 0xaf, 0x49, + 0xb6, 0x0f, 0x93, 0xa2, 0x54, 0xaf, 0xa8, 0x5b, 0xc5, 0x28, 0xa7, 0x8a, 0xcf, 0x20, 0xe9, 0x39, + 0xc8, 0xfc, 0x14, 0xb6, 0xd5, 0x9b, 0x28, 0x1d, 0x45, 0x94, 0xbb, 0x82, 0x5f, 0x41, 0x7c, 0x2b, + 0x37, 0x14, 0xeb, 0x19, 0xc6, 0x3e, 0xc3, 0x29, 0x24, 0x3d, 0x03, 0x49, 0xfd, 0xb8, 0x0a, 0x7c, + 0x57, 0xe7, 0x9f, 0x01, 0x4c, 0xef, 0x70, 0x75, 0x2f, 0xa4, 0x78, 0xc1, 0x0a, 0xa5, 0x7d, 0x74, + 0x67, 0x62, 0x97, 0x10, 0x52, 0x7a, 0x76, 0x90, 0xd1, 0xb1, 0xb2, 0xe1, 0xa5, 0x0e, 0xd3, 0xdf, + 0x0f, 0x4e, 0x8e, 0x8f, 0xbe, 0xf1, 0x14, 0xd7, 0xc3, 0x0f, 0x97, 0xe8, 0xe1, 0xd7, 0x36, 0xe3, + 0xf0, 0x94, 0xc1, 0xc3, 0x0f, 0xf7, 0xe2, 0xe1, 0xd7, 0xe2, 0xf2, 0xd1, 0x62, 0xd2, 0xfe, 0xb3, + 0x8b, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x33, 0x8d, 0x09, 0xe1, 0x78, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// KeyManagementServiceClient is the client API for KeyManagementService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyManagementServiceClient interface { + // Version returns the runtime name and runtime version of the KMS provider. + Version(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*VersionResponse, error) + // Execute decryption operation in KMS provider. + Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) + // Execute encryption operation in KMS provider. + Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) +} + +type keyManagementServiceClient struct { + cc *grpc.ClientConn +} + +func NewKeyManagementServiceClient(cc *grpc.ClientConn) KeyManagementServiceClient { + return &keyManagementServiceClient{cc} +} + +func (c *keyManagementServiceClient) Version(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*VersionResponse, error) { + out := new(VersionResponse) + err := c.cc.Invoke(ctx, "/v1beta1.KeyManagementService/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) { + out := new(DecryptResponse) + err := c.cc.Invoke(ctx, "/v1beta1.KeyManagementService/Decrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) { + out := new(EncryptResponse) + err := c.cc.Invoke(ctx, "/v1beta1.KeyManagementService/Encrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyManagementServiceServer is the server API for KeyManagementService service. +type KeyManagementServiceServer interface { + // Version returns the runtime name and runtime version of the KMS provider. + Version(context.Context, *VersionRequest) (*VersionResponse, error) + // Execute decryption operation in KMS provider. + Decrypt(context.Context, *DecryptRequest) (*DecryptResponse, error) + // Execute encryption operation in KMS provider. + Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) +} + +// UnimplementedKeyManagementServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyManagementServiceServer struct { +} + +func (*UnimplementedKeyManagementServiceServer) Version(ctx context.Context, req *VersionRequest) (*VersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Decrypt(ctx context.Context, req *DecryptRequest) (*DecryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Decrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Encrypt(ctx context.Context, req *EncryptRequest) (*EncryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Encrypt not implemented") +} + +func RegisterKeyManagementServiceServer(s *grpc.Server, srv KeyManagementServiceServer) { + s.RegisterService(&_KeyManagementService_serviceDesc, srv) +} + +func _KeyManagementService_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.KeyManagementService/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Version(ctx, req.(*VersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Decrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Decrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.KeyManagementService/Decrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Decrypt(ctx, req.(*DecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Encrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Encrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.KeyManagementService/Encrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Encrypt(ctx, req.(*EncryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyManagementService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v1beta1.KeyManagementService", + HandlerType: (*KeyManagementServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _KeyManagementService_Version_Handler, + }, + { + MethodName: "Decrypt", + Handler: _KeyManagementService_Decrypt_Handler, + }, + { + MethodName: "Encrypt", + Handler: _KeyManagementService_Encrypt_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "service.proto", +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto new file mode 100644 index 000000000..b6c2f31c7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/service.proto @@ -0,0 +1,70 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// To regenerate service.pb.go run hack/update-generated-kms.sh +syntax = "proto3"; + +package v1beta1; + +// This service defines the public APIs for remote KMS provider. +service KeyManagementService { + // Version returns the runtime name and runtime version of the KMS provider. + rpc Version(VersionRequest) returns (VersionResponse) {} + + // Execute decryption operation in KMS provider. + rpc Decrypt(DecryptRequest) returns (DecryptResponse) {} + // Execute encryption operation in KMS provider. + rpc Encrypt(EncryptRequest) returns (EncryptResponse) {} +} + +message VersionRequest { + // Version of the KMS plugin API. + string version = 1; +} + +message VersionResponse { + // Version of the KMS plugin API. + string version = 1; + // Name of the KMS provider. + string runtime_name = 2; + // Version of the KMS provider. The string must be semver-compatible. + string runtime_version = 3; +} + +message DecryptRequest { + // Version of the KMS plugin API. + string version = 1; + // The data to be decrypted. + bytes cipher = 2; +} + +message DecryptResponse { + // The decrypted data. + bytes plain = 1; +} + +message EncryptRequest { + // Version of the KMS plugin API. + string version = 1; + // The data to be encrypted. + bytes plain = 2; +} + +message EncryptResponse { + // The encrypted data. + bytes cipher = 1; +} + diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go new file mode 100644 index 000000000..842d0a2fd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1/v1beta1.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains definition of kms-plugin's gRPC service. +package v1beta1 + +// IsVersionCheckMethod determines whether the supplied method is a version check against kms-plugin. +func IsVersionCheckMethod(method string) bool { + return method == "/v1beta1.KeyManagementService/Version" +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go new file mode 100644 index 000000000..e322bd9b1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/identity.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package identity + +import ( + "bytes" + "fmt" + + "k8s.io/apiserver/pkg/storage/value" +) + +// identityTransformer performs no transformation on provided data, but validates +// that the data is not encrypted data during TransformFromStorage +type identityTransformer struct{} + +// NewEncryptCheckTransformer returns an identityTransformer which returns an error +// on attempts to read encrypted data +func NewEncryptCheckTransformer() value.Transformer { + return identityTransformer{} +} + +// TransformFromStorage returns the input bytes if the data is not encrypted +func (identityTransformer) TransformFromStorage(b []byte, context value.Context) ([]byte, bool, error) { + // identityTransformer has to return an error if the data is encoded using another transformer. + // JSON data starts with '{'. Protobuf data has a prefix 'k8s[\x00-\xFF]'. + // Prefix 'k8s:enc:' is reserved for encrypted data on disk. + if bytes.HasPrefix(b, []byte("k8s:enc:")) { + return []byte{}, false, fmt.Errorf("identity transformer tried to read encrypted data") + } + return b, false, nil +} + +// TransformToStorage implements the Transformer interface for identityTransformer +func (identityTransformer) TransformToStorage(b []byte, context value.Context) ([]byte, error) { + return b, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go new file mode 100644 index 000000000..0eaa62824 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go @@ -0,0 +1,69 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package secretbox transforms values for storage at rest using XSalsa20 and Poly1305. +package secretbox + +import ( + "crypto/rand" + "fmt" + + "golang.org/x/crypto/nacl/secretbox" + + "k8s.io/apiserver/pkg/storage/value" +) + +// secretbox implements at rest encryption of the provided values given a 32 byte secret key. +// Uses a standard 24 byte nonce (placed at the beginning of the cipher text) generated +// from crypto/rand. Does not perform authentication of the data at rest. +type secretboxTransformer struct { + key [32]byte +} + +const nonceSize = 24 + +// NewSecretboxTransformer takes the given key and performs encryption and decryption on the given +// data. +func NewSecretboxTransformer(key [32]byte) value.Transformer { + return &secretboxTransformer{key: key} +} + +func (t *secretboxTransformer) TransformFromStorage(data []byte, context value.Context) ([]byte, bool, error) { + if len(data) < (secretbox.Overhead + nonceSize) { + return nil, false, fmt.Errorf("the stored data was shorter than the required size") + } + var nonce [nonceSize]byte + copy(nonce[:], data[:nonceSize]) + data = data[nonceSize:] + out := make([]byte, 0, len(data)-secretbox.Overhead) + result, ok := secretbox.Open(out, data, &nonce, &t.key) + if !ok { + return nil, false, fmt.Errorf("output array was not large enough for encryption") + } + return result, false, nil +} + +func (t *secretboxTransformer) TransformToStorage(data []byte, context value.Context) ([]byte, error) { + var nonce [nonceSize]byte + n, err := rand.Read(nonce[:]) + if err != nil { + return nil, err + } + if n != nonceSize { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + return secretbox.Seal(nonce[:], data, &nonce, &t.key), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/tracing/config.go b/vendor/k8s.io/apiserver/pkg/tracing/config.go new file mode 100644 index 000000000..33ec6ae10 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/tracing/config.go @@ -0,0 +1,119 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracing + +import ( + "fmt" + "io/ioutil" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/apis/apiserver/install" +) + +const ( + maxSamplingRatePerMillion = 1000000 +) + +var ( + cfgScheme = runtime.NewScheme() + codecs = serializer.NewCodecFactory(cfgScheme) +) + +func init() { + install.Install(cfgScheme) +} + +// ReadTracingConfiguration reads the tracing configuration from a file +func ReadTracingConfiguration(configFilePath string) (*apiserver.TracingConfiguration, error) { + if configFilePath == "" { + return nil, fmt.Errorf("tracing config file was empty") + } + data, err := ioutil.ReadFile(configFilePath) + if err != nil { + return nil, fmt.Errorf("unable to read tracing configuration from %q: %v", configFilePath, err) + } + internalConfig := &apiserver.TracingConfiguration{} + // this handles json/yaml/whatever, and decodes all registered version to the internal version + if err := runtime.DecodeInto(codecs.UniversalDecoder(), data, internalConfig); err != nil { + return nil, fmt.Errorf("unable to decode tracing configuration data: %v", err) + } + return internalConfig, nil +} + +// ValidateTracingConfiguration validates the tracing configuration +func ValidateTracingConfiguration(config *apiserver.TracingConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil { + // Tracing is disabled + return allErrs + } + if config.SamplingRatePerMillion != nil { + allErrs = append(allErrs, validateSamplingRate(*config.SamplingRatePerMillion, field.NewPath("samplingRatePerMillion"))...) + } + if config.Endpoint != nil { + allErrs = append(allErrs, validateEndpoint(*config.Endpoint, field.NewPath("endpoint"))...) + } + return allErrs +} + +func validateSamplingRate(rate int32, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if rate < 0 { + errs = append(errs, field.Invalid( + fldPath, rate, + "sampling rate must be positive", + )) + } + if rate > maxSamplingRatePerMillion { + errs = append(errs, field.Invalid( + fldPath, rate, + "sampling rate per million must be less than or equal to one million", + )) + } + return errs +} + +func validateEndpoint(endpoint string, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if !strings.Contains(endpoint, "//") { + endpoint = "dns://" + endpoint + } + url, err := url.Parse(endpoint) + if err != nil { + errs = append(errs, field.Invalid( + fldPath, endpoint, + err.Error(), + )) + return errs + } + switch url.Scheme { + case "dns": + case "unix": + case "unix-abstract": + default: + errs = append(errs, field.Invalid( + fldPath, endpoint, + fmt.Sprintf("unsupported scheme: %v. Options are none, dns, unix, or unix-abstract. See https://github.com/grpc/grpc/blob/master/doc/naming.md", url.Scheme), + )) + } + return errs +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go new file mode 100644 index 000000000..a96d9bea3 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go @@ -0,0 +1,290 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffered + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/client-go/util/flowcontrol" +) + +// PluginName is the name reported in error metrics. +const PluginName = "buffered" + +// BatchConfig represents batching delegate audit backend configuration. +type BatchConfig struct { + // BufferSize defines a size of the buffering queue. + BufferSize int + // MaxBatchSize defines maximum size of a batch. + MaxBatchSize int + // MaxBatchWait indicates the maximum interval between two batches. + MaxBatchWait time.Duration + + // ThrottleEnable defines whether throttling will be applied to the batching process. + ThrottleEnable bool + // ThrottleQPS defines the allowed rate of batches per second sent to the delegate backend. + ThrottleQPS float32 + // ThrottleBurst defines the maximum number of requests sent to the delegate backend at the same moment in case + // the capacity defined by ThrottleQPS was not utilized. + ThrottleBurst int + + // Whether the delegate backend should be called asynchronously. + AsyncDelegate bool +} + +type bufferedBackend struct { + // The delegate backend that actually exports events. + delegateBackend audit.Backend + + // Channel to buffer events before sending to the delegate backend. + buffer chan *auditinternal.Event + // Maximum number of events in a batch sent to the delegate backend. + maxBatchSize int + // Amount of time to wait after sending a batch to the delegate backend before sending another one. + // + // Receiving maxBatchSize events will always trigger sending a batch, regardless of the amount of time passed. + maxBatchWait time.Duration + + // Whether the delegate backend should be called asynchronously. + asyncDelegate bool + + // Channel to signal that the batching routine has processed all remaining events and exited. + // Once `shutdownCh` is closed no new events will be sent to the delegate backend. + shutdownCh chan struct{} + + // WaitGroup to control the concurrency of sending batches to the delegate backend. + // Worker routine calls Add before sending a batch and + // then spawns a routine that calls Done after batch was processed by the delegate backend. + // This WaitGroup is used to wait for all sending routines to finish before shutting down audit backend. + wg sync.WaitGroup + + // Limits the number of batches sent to the delegate backend per second. + throttle flowcontrol.RateLimiter +} + +var _ audit.Backend = &bufferedBackend{} + +// NewBackend returns a buffered audit backend that wraps delegate backend. +// Buffered backend automatically runs and shuts down the delegate backend. +func NewBackend(delegate audit.Backend, config BatchConfig) audit.Backend { + var throttle flowcontrol.RateLimiter + if config.ThrottleEnable { + throttle = flowcontrol.NewTokenBucketRateLimiter(config.ThrottleQPS, config.ThrottleBurst) + } + return &bufferedBackend{ + delegateBackend: delegate, + buffer: make(chan *auditinternal.Event, config.BufferSize), + maxBatchSize: config.MaxBatchSize, + maxBatchWait: config.MaxBatchWait, + asyncDelegate: config.AsyncDelegate, + shutdownCh: make(chan struct{}), + wg: sync.WaitGroup{}, + throttle: throttle, + } +} + +func (b *bufferedBackend) Run(stopCh <-chan struct{}) error { + go func() { + // Signal that the working routine has exited. + defer close(b.shutdownCh) + + b.processIncomingEvents(stopCh) + + // Handle the events that were received after the last buffer + // scraping and before this line. Since the buffer is closed, no new + // events will come through. + allEventsProcessed := false + timer := make(chan time.Time) + for !allEventsProcessed { + allEventsProcessed = func() bool { + // Recover from any panic in order to try to process all remaining events. + // Note, that in case of a panic, the return value will be false and + // the loop execution will continue. + defer runtime.HandleCrash() + + events := b.collectEvents(timer, wait.NeverStop) + b.processEvents(events) + return len(events) == 0 + }() + } + }() + return b.delegateBackend.Run(stopCh) +} + +// Shutdown blocks until stopCh passed to the Run method is closed and all +// events added prior to that moment are batched and sent to the delegate backend. +func (b *bufferedBackend) Shutdown() { + // Wait until the routine spawned in Run method exits. + <-b.shutdownCh + + // Wait until all sending routines exit. + // + // - When b.shutdownCh is closed, we know that the goroutine in Run has terminated. + // - This means that processIncomingEvents has terminated. + // - Which means that b.buffer is closed and cannot accept any new events anymore. + // - Because processEvents is called synchronously from the Run goroutine, the waitgroup has its final value. + // Hence wg.Wait will not miss any more outgoing batches. + b.wg.Wait() + + b.delegateBackend.Shutdown() +} + +// processIncomingEvents runs a loop that collects events from the buffer. When +// b.stopCh is closed, processIncomingEvents stops and closes the buffer. +func (b *bufferedBackend) processIncomingEvents(stopCh <-chan struct{}) { + defer close(b.buffer) + + var ( + maxWaitChan <-chan time.Time + maxWaitTimer *time.Timer + ) + // Only use max wait batching if batching is enabled. + if b.maxBatchSize > 1 { + maxWaitTimer = time.NewTimer(b.maxBatchWait) + maxWaitChan = maxWaitTimer.C + defer maxWaitTimer.Stop() + } + + for { + func() { + // Recover from any panics caused by this function so a panic in the + // goroutine can't bring down the main routine. + defer runtime.HandleCrash() + + if b.maxBatchSize > 1 { + maxWaitTimer.Reset(b.maxBatchWait) + } + b.processEvents(b.collectEvents(maxWaitChan, stopCh)) + }() + + select { + case <-stopCh: + return + default: + } + } +} + +// collectEvents attempts to collect some number of events in a batch. +// +// The following things can cause collectEvents to stop and return the list +// of events: +// +// * Maximum number of events for a batch. +// * Timer has passed. +// * Buffer channel is closed and empty. +// * stopCh is closed. +func (b *bufferedBackend) collectEvents(timer <-chan time.Time, stopCh <-chan struct{}) []*auditinternal.Event { + var events []*auditinternal.Event + +L: + for i := 0; i < b.maxBatchSize; i++ { + select { + case ev, ok := <-b.buffer: + // Buffer channel was closed and no new events will follow. + if !ok { + break L + } + events = append(events, ev) + case <-timer: + // Timer has expired. Send currently accumulated batch. + break L + case <-stopCh: + // Backend has been stopped. Send currently accumulated batch. + break L + } + } + + return events +} + +// processEvents process the batch events in a goroutine using delegateBackend's ProcessEvents. +func (b *bufferedBackend) processEvents(events []*auditinternal.Event) { + if len(events) == 0 { + return + } + + // TODO(audit): Should control the number of active goroutines + // if one goroutine takes 5 seconds to finish, the number of goroutines can be 5 * defaultBatchThrottleQPS + if b.throttle != nil { + b.throttle.Accept() + } + + if b.asyncDelegate { + b.wg.Add(1) + go func() { + defer b.wg.Done() + defer runtime.HandleCrash() + + // Execute the real processing in a goroutine to keep it from blocking. + // This lets the batching routine continue draining the queue immediately. + b.delegateBackend.ProcessEvents(events...) + }() + } else { + func() { + defer runtime.HandleCrash() + + // Execute the real processing in a goroutine to keep it from blocking. + // This lets the batching routine continue draining the queue immediately. + b.delegateBackend.ProcessEvents(events...) + }() + } +} + +func (b *bufferedBackend) ProcessEvents(ev ...*auditinternal.Event) bool { + // The following mechanism is in place to support the situation when audit + // events are still coming after the backend was stopped. + var sendErr error + var evIndex int + + // If the delegateBackend was shutdown and the buffer channel was closed, an + // attempt to add an event to it will result in panic that we should + // recover from. + defer func() { + if err := recover(); err != nil { + sendErr = fmt.Errorf("audit backend shut down") + } + if sendErr != nil { + audit.HandlePluginError(PluginName, sendErr, ev[evIndex:]...) + } + }() + + for i, e := range ev { + evIndex = i + // Per the audit.Backend interface these events are reused after being + // sent to the Sink. Deep copy and send the copy to the queue. + event := e.DeepCopy() + + select { + case b.buffer <- event: + default: + sendErr = fmt.Errorf("audit buffer queue blocked") + return true + } + } + return true +} + +func (b *bufferedBackend) String() string { + return fmt.Sprintf("%s<%s>", PluginName, b.delegateBackend) +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go new file mode 100644 index 000000000..a82599e42 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package buffered provides an implementation for the audit.Backend interface +// that batches incoming audit events and sends batches to the delegate audit.Backend. +package buffered // import "k8s.io/apiserver/plugin/pkg/audit/buffered" diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go new file mode 100644 index 000000000..2ef2cc6ec --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/log/backend.go @@ -0,0 +1,104 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "fmt" + "io" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" +) + +const ( + // FormatLegacy saves event in 1-line text format. + FormatLegacy = "legacy" + // FormatJson saves event in structured json format. + FormatJson = "json" + + // PluginName is the name of this plugin, to be used in help and logs. + PluginName = "log" +) + +// AllowedFormats are the formats known by log backend. +var AllowedFormats = []string{ + FormatLegacy, + FormatJson, +} + +type backend struct { + out io.Writer + format string + encoder runtime.Encoder +} + +var _ audit.Backend = &backend{} + +func NewBackend(out io.Writer, format string, groupVersion schema.GroupVersion) audit.Backend { + return &backend{ + out: out, + format: format, + encoder: audit.Codecs.LegacyCodec(groupVersion), + } +} + +func (b *backend) ProcessEvents(events ...*auditinternal.Event) bool { + success := true + for _, ev := range events { + success = b.logEvent(ev) && success + } + return success +} + +func (b *backend) logEvent(ev *auditinternal.Event) bool { + line := "" + switch b.format { + case FormatLegacy: + line = audit.EventString(ev) + "\n" + case FormatJson: + bs, err := runtime.Encode(b.encoder, ev) + if err != nil { + audit.HandlePluginError(PluginName, err, ev) + return false + } + line = string(bs[:]) + default: + audit.HandlePluginError(PluginName, fmt.Errorf("log format %q is not in list of known formats (%s)", + b.format, strings.Join(AllowedFormats, ",")), ev) + return false + } + if _, err := fmt.Fprint(b.out, line); err != nil { + audit.HandlePluginError(PluginName, err, ev) + return false + } + return true +} + +func (b *backend) Run(stopCh <-chan struct{}) error { + return nil +} + +func (b *backend) Shutdown() { + // Nothing to do here. +} + +func (b *backend) String() string { + return PluginName +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go new file mode 100644 index 000000000..9392ac314 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package truncate provides an implementation for the audit.Backend interface +// that truncates audit events and sends them to the delegate audit.Backend. +package truncate // import "k8s.io/apiserver/plugin/pkg/audit/truncate" diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go new file mode 100644 index 000000000..de1c2d9f7 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/truncate/truncate.go @@ -0,0 +1,160 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package truncate + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" +) + +const ( + // PluginName is the name reported in error metrics. + PluginName = "truncate" + + // annotationKey defines the name of the annotation used to indicate truncation. + annotationKey = "audit.k8s.io/truncated" + // annotationValue defines the value of the annotation used to indicate truncation. + annotationValue = "true" +) + +// Config represents truncating backend configuration. +type Config struct { + // MaxEventSize defines max allowed size of the event. If the event is larger, + // truncating will be performed. + MaxEventSize int64 + + // MaxBatchSize defined max allowed size of the batch of events, passed to the backend. + // If the total size of the batch is larger than this number, batch will be split. Actual + // size of the serialized request might be slightly higher, on the order of hundreds of bytes. + MaxBatchSize int64 +} + +type backend struct { + // The delegate backend that actually exports events. + delegateBackend audit.Backend + + // Configuration used for truncation. + c Config + + // Encoder used to calculate audit event sizes. + e runtime.Encoder +} + +var _ audit.Backend = &backend{} + +// NewBackend returns a new truncating backend, using configuration passed in the parameters. +// Truncate backend automatically runs and shut downs the delegate backend. +func NewBackend(delegateBackend audit.Backend, config Config, groupVersion schema.GroupVersion) audit.Backend { + return &backend{ + delegateBackend: delegateBackend, + c: config, + e: audit.Codecs.LegacyCodec(groupVersion), + } +} + +func (b *backend) ProcessEvents(events ...*auditinternal.Event) bool { + var errors []error + var impacted []*auditinternal.Event + var batch []*auditinternal.Event + var batchSize int64 + success := true + for _, event := range events { + size, err := b.calcSize(event) + // If event was correctly serialized, but the size is more than allowed + // and it makes sense to do trimming, i.e. there's a request and/or + // response present, try to strip away request and response. + if err == nil && size > b.c.MaxEventSize && event.Level.GreaterOrEqual(auditinternal.LevelRequest) { + event = truncate(event) + size, err = b.calcSize(event) + } + if err != nil { + errors = append(errors, err) + impacted = append(impacted, event) + continue + } + if size > b.c.MaxEventSize { + errors = append(errors, fmt.Errorf("event is too large even after truncating")) + impacted = append(impacted, event) + continue + } + + if len(batch) > 0 && batchSize+size > b.c.MaxBatchSize { + success = b.delegateBackend.ProcessEvents(batch...) && success + batch = []*auditinternal.Event{} + batchSize = 0 + } + + batchSize += size + batch = append(batch, event) + } + + if len(batch) > 0 { + success = b.delegateBackend.ProcessEvents(batch...) && success + } + + if len(impacted) > 0 { + audit.HandlePluginError(PluginName, utilerrors.NewAggregate(errors), impacted...) + } + return success +} + +// truncate removed request and response objects from the audit events, +// to try and keep at least metadata. +func truncate(e *auditinternal.Event) *auditinternal.Event { + // Make a shallow copy to avoid copying response/request objects. + newEvent := &auditinternal.Event{} + *newEvent = *e + + newEvent.RequestObject = nil + newEvent.ResponseObject = nil + audit.LogAnnotation(newEvent, annotationKey, annotationValue) + return newEvent +} + +func (b *backend) Run(stopCh <-chan struct{}) error { + return b.delegateBackend.Run(stopCh) +} + +func (b *backend) Shutdown() { + b.delegateBackend.Shutdown() +} + +func (b *backend) calcSize(e *auditinternal.Event) (int64, error) { + s := &sizer{} + if err := b.e.Encode(e, s); err != nil { + return 0, err + } + return s.Size, nil +} + +func (b *backend) String() string { + return fmt.Sprintf("%s<%s>", PluginName, b.delegateBackend) +} + +type sizer struct { + Size int64 +} + +func (s *sizer) Write(p []byte) (n int, err error) { + s.Size += int64(len(p)) + return len(p), nil +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go new file mode 100644 index 000000000..0a2aa7078 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/audit/webhook/webhook.go @@ -0,0 +1,139 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook implements the audit.Backend interface using HTTP webhooks. +package webhook + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/apis/audit/install" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/rest" + utiltrace "k8s.io/utils/trace" +) + +const ( + // PluginName is the name of this plugin, to be used in help and logs. + PluginName = "webhook" + + // DefaultInitialBackoffDelay is the default amount of time to wait before + // retrying sending audit events through a webhook. + DefaultInitialBackoffDelay = 10 * time.Second +) + +func init() { + install.Install(audit.Scheme) +} + +// retryOnError enforces the webhook client to retry requests +// on error regardless of its nature. +// The default implementation considers a very limited set of +// 'retriable' errors, assuming correct use of HTTP codes by +// external webhooks. +// That may easily lead to dropped audit events. In fact, there is +// hardly any error that could be a justified reason NOT to retry +// sending audit events if there is even a slight chance that the +// receiving service gets back to normal at some point. +func retryOnError(err error) bool { + if err != nil { + return true + } + return false +} + +func loadWebhook(configFile string, groupVersion schema.GroupVersion, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (*webhook.GenericWebhook, error) { + w, err := webhook.NewGenericWebhook(audit.Scheme, audit.Codecs, configFile, + []schema.GroupVersion{groupVersion}, retryBackoff, customDial) + if err != nil { + return nil, err + } + + w.ShouldRetry = retryOnError + return w, nil +} + +type backend struct { + w *webhook.GenericWebhook + name string +} + +// NewDynamicBackend returns an audit backend configured from a REST client that +// sends events over HTTP to an external service. +func NewDynamicBackend(rc *rest.RESTClient, retryBackoff wait.Backoff) audit.Backend { + return &backend{ + w: &webhook.GenericWebhook{ + RestClient: rc, + RetryBackoff: retryBackoff, + ShouldRetry: retryOnError, + }, + name: fmt.Sprintf("dynamic_%s", PluginName), + } +} + +// NewBackend returns an audit backend that sends events over HTTP to an external service. +func NewBackend(kubeConfigFile string, groupVersion schema.GroupVersion, retryBackoff wait.Backoff, customDial utilnet.DialFunc) (audit.Backend, error) { + w, err := loadWebhook(kubeConfigFile, groupVersion, retryBackoff, customDial) + if err != nil { + return nil, err + } + return &backend{w: w, name: PluginName}, nil +} + +func (b *backend) Run(stopCh <-chan struct{}) error { + return nil +} + +func (b *backend) Shutdown() { + // nothing to do here +} + +func (b *backend) ProcessEvents(ev ...*auditinternal.Event) bool { + if err := b.processEvents(ev...); err != nil { + audit.HandlePluginError(b.String(), err, ev...) + return false + } + return true +} + +func (b *backend) processEvents(ev ...*auditinternal.Event) error { + var list auditinternal.EventList + for _, e := range ev { + list.Items = append(list.Items, *e) + } + return b.w.WithExponentialBackoff(context.Background(), func() rest.Result { + trace := utiltrace.New("Call Audit Events webhook", + utiltrace.Field{"name", b.name}, + utiltrace.Field{"event-count", len(list.Items)}) + // Only log audit webhook traces that exceed a 25ms per object limit plus a 50ms + // request overhead allowance. The high per object limit used here is primarily to + // allow enough time for the serialization/deserialization of audit events, which + // contain nested request and response objects plus additional event fields. + defer trace.LogIfLong(time.Duration(50+25*len(list.Items)) * time.Millisecond) + return b.w.RestClient.Post().Body(&list).Do(context.TODO()) + }).Error() +} + +func (b *backend) String() string { + return b.name +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5169b7d88..03ee2f4d7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -362,6 +362,13 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc +# golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/internal/subtle +golang.org/x/crypto/nacl/secretbox +golang.org/x/crypto/poly1305 +golang.org/x/crypto/salsa20/salsa # golang.org/x/mod v0.4.2 golang.org/x/mod/module golang.org/x/mod/semver @@ -384,6 +391,7 @@ golang.org/x/oauth2/internal # golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync/singleflight # golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf +golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 @@ -535,6 +543,8 @@ google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 +# gopkg.in/natefinch/lumberjack.v2 v2.0.0 +gopkg.in/natefinch/lumberjack.v2 # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.4.0 @@ -686,10 +696,14 @@ k8s.io/apiserver/pkg/apis/apiserver/v1 k8s.io/apiserver/pkg/apis/apiserver/v1alpha1 k8s.io/apiserver/pkg/apis/apiserver/v1beta1 k8s.io/apiserver/pkg/apis/audit +k8s.io/apiserver/pkg/apis/audit/install k8s.io/apiserver/pkg/apis/audit/v1 k8s.io/apiserver/pkg/apis/audit/v1alpha1 k8s.io/apiserver/pkg/apis/audit/v1beta1 k8s.io/apiserver/pkg/apis/audit/validation +k8s.io/apiserver/pkg/apis/config +k8s.io/apiserver/pkg/apis/config/v1 +k8s.io/apiserver/pkg/apis/config/validation k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap k8s.io/apiserver/pkg/audit k8s.io/apiserver/pkg/audit/policy @@ -708,6 +722,7 @@ k8s.io/apiserver/pkg/authentication/token/tokenfile k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/authorization/authorizer k8s.io/apiserver/pkg/authorization/authorizerfactory +k8s.io/apiserver/pkg/authorization/path k8s.io/apiserver/pkg/authorization/union k8s.io/apiserver/pkg/endpoints k8s.io/apiserver/pkg/endpoints/deprecation @@ -727,6 +742,7 @@ k8s.io/apiserver/pkg/endpoints/warning k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/quota/v1 k8s.io/apiserver/pkg/registry/generic +k8s.io/apiserver/pkg/registry/generic/registry k8s.io/apiserver/pkg/registry/rest k8s.io/apiserver/pkg/server k8s.io/apiserver/pkg/server/dynamiccertificates @@ -736,16 +752,27 @@ k8s.io/apiserver/pkg/server/filters k8s.io/apiserver/pkg/server/healthz k8s.io/apiserver/pkg/server/httplog k8s.io/apiserver/pkg/server/mux +k8s.io/apiserver/pkg/server/options +k8s.io/apiserver/pkg/server/options/encryptionconfig +k8s.io/apiserver/pkg/server/resourceconfig k8s.io/apiserver/pkg/server/routes k8s.io/apiserver/pkg/server/storage k8s.io/apiserver/pkg/storage +k8s.io/apiserver/pkg/storage/cacher +k8s.io/apiserver/pkg/storage/errors k8s.io/apiserver/pkg/storage/etcd3 k8s.io/apiserver/pkg/storage/etcd3/metrics k8s.io/apiserver/pkg/storage/names k8s.io/apiserver/pkg/storage/storagebackend k8s.io/apiserver/pkg/storage/storagebackend/factory k8s.io/apiserver/pkg/storage/value +k8s.io/apiserver/pkg/storage/value/encrypt/aes +k8s.io/apiserver/pkg/storage/value/encrypt/envelope +k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1 +k8s.io/apiserver/pkg/storage/value/encrypt/identity +k8s.io/apiserver/pkg/storage/value/encrypt/secretbox k8s.io/apiserver/pkg/storageversion +k8s.io/apiserver/pkg/tracing k8s.io/apiserver/pkg/util/apihelpers k8s.io/apiserver/pkg/util/dryrun k8s.io/apiserver/pkg/util/feature @@ -766,6 +793,10 @@ k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/wsstream k8s.io/apiserver/pkg/util/x509metrics k8s.io/apiserver/pkg/warning +k8s.io/apiserver/plugin/pkg/audit/buffered +k8s.io/apiserver/plugin/pkg/audit/log +k8s.io/apiserver/plugin/pkg/audit/truncate +k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook # k8s.io/cli-runtime v0.22.2 @@ -1343,6 +1374,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk ## explicit sigs.k8s.io/mcs-api/pkg/apis/v1alpha1 # sigs.k8s.io/structured-merge-diff/v4 v4.1.2 +## explicit sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema