mirror of https://github.com/knative/caching.git
Migrate to knative.dev/pkg (#35)
These changes done via: ``` sed -i 's@"github.com/knative/pkg@"knative.dev/pkg@g' $(find -name '*.go' | grep -v vendor | xargs grep github.com/knative/pkg | cut -d':' -f 1) ``` `Gopkg.toml`: - github.com/knative/pkg -> knative.dev/pkg - release-0.7 -> master `./hack/update-codegen.sh`: - github.com/knative/pkg -> knative.dev/pkg ``` ./hack/update-deps.sh ./hack/update-codegen.sh ```
This commit is contained in:
parent
b6c8f4851e
commit
aa9bd3961c
|
@ -117,6 +117,14 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
pruneopts = "NUT"
|
||||
revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4dacf728c83400b3e9d1d3025dd3c1e93e9a1b033726d1b193dc209f3fa9cb7a"
|
||||
name = "github.com/golang/protobuf"
|
||||
|
@ -244,28 +252,6 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6a405899228cd065e8730e163e7fbb4faaee51af4035120ec054795873380d50"
|
||||
name = "github.com/knative/pkg"
|
||||
packages = [
|
||||
"apis",
|
||||
"changeset",
|
||||
"codegen/cmd/injection-gen",
|
||||
"codegen/cmd/injection-gen/args",
|
||||
"codegen/cmd/injection-gen/generators",
|
||||
"configmap",
|
||||
"controller",
|
||||
"injection",
|
||||
"kmeta",
|
||||
"kmp",
|
||||
"logging",
|
||||
"logging/logkey",
|
||||
"metrics",
|
||||
"metrics/metricskey",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "4d86f3ad6f48dffa0846c76711ca6c3697def63c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1bfc083da5bbeb7abaac53c56890eb14eb11bac9ec985bfe338c4bbb0540c9ba"
|
||||
|
@ -746,7 +732,7 @@
|
|||
version = "kubernetes-1.12.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:febab0466fc1b384189951d4c1aa175cbd03ad10cdb529d1ff2e8b20036ba7e3"
|
||||
digest = "1:5ccc821e42e8ab9d53d252b4c12d0b7f388afea1f4f2ceb2014dd27976970dc7"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
|
@ -867,6 +853,7 @@
|
|||
"tools/clientcmd/api",
|
||||
"tools/metrics",
|
||||
"tools/pager",
|
||||
"tools/record",
|
||||
"tools/reference",
|
||||
"transport",
|
||||
"util/buffer",
|
||||
|
@ -941,17 +928,34 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "e3762e86a74c878ffed47484592986685639c2cd"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a1c54962db67cc9b1bda27c8657ae3ca1e7cd9f2702a80012e0abc932c7c7091"
|
||||
name = "knative.dev/pkg"
|
||||
packages = [
|
||||
"apis",
|
||||
"changeset",
|
||||
"codegen/cmd/injection-gen",
|
||||
"codegen/cmd/injection-gen/args",
|
||||
"codegen/cmd/injection-gen/generators",
|
||||
"configmap",
|
||||
"controller",
|
||||
"injection",
|
||||
"kmeta",
|
||||
"kmp",
|
||||
"logging",
|
||||
"logging/logkey",
|
||||
"metrics",
|
||||
"metrics/metricskey",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "3c6bc12e7897bf8c0d1eb1411fbd3d80537420f9"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/google/go-cmp/cmp",
|
||||
"github.com/knative/pkg/apis",
|
||||
"github.com/knative/pkg/codegen/cmd/injection-gen",
|
||||
"github.com/knative/pkg/controller",
|
||||
"github.com/knative/pkg/injection",
|
||||
"github.com/knative/pkg/kmeta",
|
||||
"github.com/knative/pkg/logging",
|
||||
"github.com/knative/test-infra/scripts",
|
||||
"github.com/knative/test-infra/tools/dep-collector",
|
||||
"k8s.io/api/core/v1",
|
||||
|
@ -977,6 +981,12 @@
|
|||
"k8s.io/code-generator/cmd/defaulter-gen",
|
||||
"k8s.io/code-generator/cmd/informer-gen",
|
||||
"k8s.io/code-generator/cmd/lister-gen",
|
||||
"knative.dev/pkg/apis",
|
||||
"knative.dev/pkg/codegen/cmd/injection-gen",
|
||||
"knative.dev/pkg/controller",
|
||||
"knative.dev/pkg/injection",
|
||||
"knative.dev/pkg/kmeta",
|
||||
"knative.dev/pkg/logging",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -8,7 +8,7 @@ required = [
|
|||
"k8s.io/code-generator/cmd/client-gen",
|
||||
"k8s.io/code-generator/cmd/lister-gen",
|
||||
"k8s.io/code-generator/cmd/informer-gen",
|
||||
"github.com/knative/pkg/codegen/cmd/injection-gen",
|
||||
"knative.dev/pkg/codegen/cmd/injection-gen",
|
||||
"github.com/knative/test-infra/scripts",
|
||||
"github.com/knative/test-infra/tools/dep-collector",
|
||||
]
|
||||
|
@ -38,9 +38,8 @@ required = [
|
|||
version = "kubernetes-1.12.6"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/knative/pkg"
|
||||
# HEAD as of 2919-06-05
|
||||
revision = "4d86f3ad6f48dffa0846c76711ca6c3697def63c"
|
||||
name = "knative.dev/pkg"
|
||||
branch = "master"
|
||||
|
||||
[[override]]
|
||||
name = "google.golang.org/genproto"
|
||||
|
@ -76,6 +75,6 @@ required = [
|
|||
non-go = false
|
||||
|
||||
[[prune.project]]
|
||||
name = "github.com/knative/pkg"
|
||||
name = "knative.dev/pkg"
|
||||
unused-packages = false
|
||||
non-go = false
|
||||
|
|
|
@ -20,7 +20,7 @@ set -o pipefail
|
|||
|
||||
REPO_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
KNATIVE_CODEGEN_PKG=${KNATIVE_CODEGEN_PKG:-$(cd ${REPO_ROOT}; ls -d -1 ./vendor/github.com/knative/pkg 2>/dev/null || echo ../pkg)}
|
||||
KNATIVE_CODEGEN_PKG=${KNATIVE_CODEGEN_PKG:-$(cd ${REPO_ROOT}; ls -d -1 ./vendor/knative.dev/pkg 2>/dev/null || echo ../pkg)}
|
||||
|
||||
# generate the code with:
|
||||
# --output-base because this script should also be able to run inside the vendor dir of
|
||||
|
|
|
@ -25,8 +25,8 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"github.com/knative/pkg/kmeta"
|
||||
"knative.dev/pkg/apis"
|
||||
"knative.dev/pkg/kmeta"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"knative.dev/pkg/apis"
|
||||
)
|
||||
|
||||
func TestIsReady(t *testing.T) {
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"knative.dev/pkg/apis"
|
||||
)
|
||||
|
||||
func (rt *Image) Validate(ctx context.Context) *apis.FieldError {
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"knative.dev/pkg/apis"
|
||||
)
|
||||
|
||||
func TestImageValidation(t *testing.T) {
|
||||
|
|
|
@ -22,9 +22,9 @@ import (
|
|||
"context"
|
||||
|
||||
versioned "github.com/knative/caching/pkg/client/clientset/versioned"
|
||||
injection "github.com/knative/pkg/injection"
|
||||
logging "github.com/knative/pkg/logging"
|
||||
rest "k8s.io/client-go/rest"
|
||||
injection "knative.dev/pkg/injection"
|
||||
logging "knative.dev/pkg/logging"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -23,10 +23,10 @@ import (
|
|||
|
||||
fake "github.com/knative/caching/pkg/client/clientset/versioned/fake"
|
||||
client "github.com/knative/caching/pkg/client/injection/client"
|
||||
injection "github.com/knative/pkg/injection"
|
||||
logging "github.com/knative/pkg/logging"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
injection "knative.dev/pkg/injection"
|
||||
logging "knative.dev/pkg/logging"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -23,9 +23,9 @@ import (
|
|||
|
||||
externalversions "github.com/knative/caching/pkg/client/informers/externalversions"
|
||||
client "github.com/knative/caching/pkg/client/injection/client"
|
||||
controller "github.com/knative/pkg/controller"
|
||||
injection "github.com/knative/pkg/injection"
|
||||
logging "github.com/knative/pkg/logging"
|
||||
controller "knative.dev/pkg/controller"
|
||||
injection "knative.dev/pkg/injection"
|
||||
logging "knative.dev/pkg/logging"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -24,8 +24,8 @@ import (
|
|||
externalversions "github.com/knative/caching/pkg/client/informers/externalversions"
|
||||
fake "github.com/knative/caching/pkg/client/injection/client/fake"
|
||||
factory "github.com/knative/caching/pkg/client/injection/informers/caching/factory"
|
||||
controller "github.com/knative/pkg/controller"
|
||||
injection "github.com/knative/pkg/injection"
|
||||
controller "knative.dev/pkg/controller"
|
||||
injection "knative.dev/pkg/injection"
|
||||
)
|
||||
|
||||
var Get = factory.Get
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
|
||||
fake "github.com/knative/caching/pkg/client/injection/informers/caching/factory/fake"
|
||||
image "github.com/knative/caching/pkg/client/injection/informers/caching/v1alpha1/image"
|
||||
controller "github.com/knative/pkg/controller"
|
||||
injection "github.com/knative/pkg/injection"
|
||||
controller "knative.dev/pkg/controller"
|
||||
injection "knative.dev/pkg/injection"
|
||||
)
|
||||
|
||||
var Get = image.Get
|
||||
|
|
|
@ -23,9 +23,9 @@ import (
|
|||
|
||||
v1alpha1 "github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1"
|
||||
factory "github.com/knative/caching/pkg/client/injection/informers/caching/factory"
|
||||
controller "github.com/knative/pkg/controller"
|
||||
injection "github.com/knative/pkg/injection"
|
||||
logging "github.com/knative/pkg/logging"
|
||||
controller "knative.dev/pkg/controller"
|
||||
injection "knative.dev/pkg/injection"
|
||||
logging "knative.dev/pkg/logging"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -0,0 +1,191 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package lru implements an LRU cache.
|
||||
package lru
|
||||
|
||||
import "container/list"
|
||||
|
||||
// Cache is an LRU cache. It is not safe for concurrent access.
|
||||
type Cache struct {
|
||||
// MaxEntries is the maximum number of cache entries before
|
||||
// an item is evicted. Zero means no limit.
|
||||
MaxEntries int
|
||||
|
||||
// OnEvicted optionally specifies a callback function to be
|
||||
// executed when an entry is purged from the cache.
|
||||
OnEvicted func(key Key, value interface{})
|
||||
|
||||
ll *list.List
|
||||
cache map[interface{}]*list.Element
|
||||
}
|
||||
|
||||
// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
|
||||
type Key interface{}
|
||||
|
||||
type entry struct {
|
||||
key Key
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// New creates a new Cache.
|
||||
// If maxEntries is zero, the cache has no limit and it's assumed
|
||||
// that eviction is done by the caller.
|
||||
func New(maxEntries int) *Cache {
|
||||
return &Cache{
|
||||
MaxEntries: maxEntries,
|
||||
ll: list.New(),
|
||||
cache: make(map[interface{}]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *Cache) Add(key Key, value interface{}) {
|
||||
if c.cache == nil {
|
||||
c.cache = make(map[interface{}]*list.Element)
|
||||
c.ll = list.New()
|
||||
}
|
||||
if ee, ok := c.cache[key]; ok {
|
||||
c.ll.MoveToFront(ee)
|
||||
ee.Value.(*entry).value = value
|
||||
return
|
||||
}
|
||||
ele := c.ll.PushFront(&entry{key, value})
|
||||
c.cache[key] = ele
|
||||
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
|
||||
c.RemoveOldest()
|
||||
}
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key Key) (value interface{}, ok bool) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.ll.MoveToFront(ele)
|
||||
return ele.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key Key) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
ele := c.ll.Back()
|
||||
if ele != nil {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) removeElement(e *list.Element) {
|
||||
c.ll.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.cache, kv.key)
|
||||
if c.OnEvicted != nil {
|
||||
c.OnEvicted(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
if c.cache == nil {
|
||||
return 0
|
||||
}
|
||||
return c.ll.Len()
|
||||
}
|
||||
|
||||
// Clear purges all stored items from the cache.
|
||||
func (c *Cache) Clear() {
|
||||
if c.OnEvicted != nil {
|
||||
for _, e := range c.cache {
|
||||
kv := e.Value.(*entry)
|
||||
c.OnEvicted(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
c.ll = nil
|
||||
c.cache = nil
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
<!--
|
||||
Pro-tip: To automatically close issues when a PR is merged,
|
||||
include the following in your PR description:
|
||||
|
||||
Fixes: <LINK TO ISSUE>
|
||||
-->
|
|
@ -1,43 +0,0 @@
|
|||
/*
|
||||
Copyright 2019 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package injection
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/knative/pkg/configmap"
|
||||
"github.com/knative/pkg/controller"
|
||||
)
|
||||
|
||||
// ControllerInjector holds the type of a callback that attaches a particular
|
||||
// controller type to a context.
|
||||
type ControllerInjector func(context.Context, configmap.Watcher) *controller.Impl
|
||||
|
||||
func (i *impl) RegisterController(ii ControllerInjector) {
|
||||
i.m.Lock()
|
||||
defer i.m.Unlock()
|
||||
|
||||
i.controllers = append(i.controllers, ii)
|
||||
}
|
||||
|
||||
func (i *impl) GetControllers() []ControllerInjector {
|
||||
i.m.RLock()
|
||||
defer i.m.RUnlock()
|
||||
|
||||
// Copy the slice before returning.
|
||||
return append(i.controllers[:0:0], i.controllers...)
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package record has all client logic for recording and reporting events.
|
||||
package record // import "k8s.io/client-go/tools/record"
|
|
@ -0,0 +1,322 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package record
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const maxTriesPerEvent = 12
|
||||
|
||||
var defaultSleepDuration = 10 * time.Second
|
||||
|
||||
const maxQueuedEvents = 1000
|
||||
|
||||
// EventSink knows how to store events (client.Client implements it.)
|
||||
// EventSink must respect the namespace that will be embedded in 'event'.
|
||||
// It is assumed that EventSink will return the same sorts of errors as
|
||||
// pkg/client's REST client.
|
||||
type EventSink interface {
|
||||
Create(event *v1.Event) (*v1.Event, error)
|
||||
Update(event *v1.Event) (*v1.Event, error)
|
||||
Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error)
|
||||
}
|
||||
|
||||
// EventRecorder knows how to record events on behalf of an EventSource.
|
||||
type EventRecorder interface {
|
||||
// Event constructs an event from the given information and puts it in the queue for sending.
|
||||
// 'object' is the object this event is about. Event will make a reference-- or you may also
|
||||
// pass a reference to the object directly.
|
||||
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
|
||||
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it
|
||||
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
|
||||
// to automate handling of events, so imagine people writing switch statements to handle them.
|
||||
// You want to make that easy.
|
||||
// 'message' is intended to be human readable.
|
||||
//
|
||||
// The resulting event will be created in the same namespace as the reference object.
|
||||
Event(object runtime.Object, eventtype, reason, message string)
|
||||
|
||||
// Eventf is just like Event, but with Sprintf for the message field.
|
||||
Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{})
|
||||
|
||||
// PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field.
|
||||
PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{})
|
||||
|
||||
// AnnotatedEventf is just like eventf, but with annotations attached
|
||||
AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{})
|
||||
}
|
||||
|
||||
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
|
||||
type EventBroadcaster interface {
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given
|
||||
// event handler function. The return value can be ignored or used to stop recording, if
|
||||
// desired.
|
||||
StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface
|
||||
|
||||
// StartRecordingToSink starts sending events received from this EventBroadcaster to the given
|
||||
// sink. The return value can be ignored or used to stop recording, if desired.
|
||||
StartRecordingToSink(sink EventSink) watch.Interface
|
||||
|
||||
// StartLogging starts sending events received from this EventBroadcaster to the given logging
|
||||
// function. The return value can be ignored or used to stop recording, if desired.
|
||||
StartLogging(logf func(format string, args ...interface{})) watch.Interface
|
||||
|
||||
// NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster
|
||||
// with the event source set to the given event source.
|
||||
NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder
|
||||
}
|
||||
|
||||
// Creates a new event broadcaster.
|
||||
func NewBroadcaster() EventBroadcaster {
|
||||
return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration}
|
||||
}
|
||||
|
||||
func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster {
|
||||
return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration}
|
||||
}
|
||||
|
||||
type eventBroadcasterImpl struct {
|
||||
*watch.Broadcaster
|
||||
sleepDuration time.Duration
|
||||
}
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
// TODO: make me an object with parameterizable queue length and retry interval
|
||||
func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface {
|
||||
// The default math/rand package functions aren't thread safe, so create a
|
||||
// new Rand object for each StartRecording call.
|
||||
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
eventCorrelator := NewEventCorrelator(clock.RealClock{})
|
||||
return eventBroadcaster.StartEventWatcher(
|
||||
func(event *v1.Event) {
|
||||
recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration)
|
||||
})
|
||||
}
|
||||
|
||||
func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) {
|
||||
// Make a copy before modification, because there could be multiple listeners.
|
||||
// Events are safe to copy like this.
|
||||
eventCopy := *event
|
||||
event = &eventCopy
|
||||
result, err := eventCorrelator.EventCorrelate(event)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
if result.Skip {
|
||||
return
|
||||
}
|
||||
tries := 0
|
||||
for {
|
||||
if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) {
|
||||
break
|
||||
}
|
||||
tries++
|
||||
if tries >= maxTriesPerEvent {
|
||||
glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
|
||||
break
|
||||
}
|
||||
// Randomize the first sleep so that various clients won't all be
|
||||
// synced up if the master goes down.
|
||||
if tries == 1 {
|
||||
time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64()))
|
||||
} else {
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isKeyNotFoundError(err error) bool {
|
||||
statusErr, _ := err.(*errors.StatusError)
|
||||
|
||||
if statusErr != nil && statusErr.Status().Code == http.StatusNotFound {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// recordEvent attempts to write event to a sink. It returns true if the event
|
||||
// was successfully recorded or discarded, false if it should be retried.
|
||||
// If updateExistingEvent is false, it creates a new event, otherwise it updates
|
||||
// existing event.
|
||||
func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool {
|
||||
var newEvent *v1.Event
|
||||
var err error
|
||||
if updateExistingEvent {
|
||||
newEvent, err = sink.Patch(event, patch)
|
||||
}
|
||||
// Update can fail because the event may have been removed and it no longer exists.
|
||||
if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) {
|
||||
// Making sure that ResourceVersion is empty on creation
|
||||
event.ResourceVersion = ""
|
||||
newEvent, err = sink.Create(event)
|
||||
}
|
||||
if err == nil {
|
||||
// we need to update our event correlator with the server returned state to handle name/resourceversion
|
||||
eventCorrelator.UpdateState(newEvent)
|
||||
return true
|
||||
}
|
||||
|
||||
// If we can't contact the server, then hold everything while we keep trying.
|
||||
// Otherwise, something about the event is malformed and we should abandon it.
|
||||
switch err.(type) {
|
||||
case *restclient.RequestConstructionError:
|
||||
// We will construct the request the same next time, so don't keep trying.
|
||||
glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
|
||||
return true
|
||||
case *errors.StatusError:
|
||||
if errors.IsAlreadyExists(err) {
|
||||
glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
} else {
|
||||
glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
}
|
||||
return true
|
||||
case *errors.UnexpectedObjectError:
|
||||
// We don't expect this; it implies the server's response didn't match a
|
||||
// known pattern. Go ahead and retry.
|
||||
default:
|
||||
// This case includes actual http transport errors. Go ahead and retry.
|
||||
}
|
||||
glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// StartLogging starts sending events received from this EventBroadcaster to the given logging function.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface {
|
||||
return eventBroadcaster.StartEventWatcher(
|
||||
func(e *v1.Event) {
|
||||
logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface {
|
||||
watcher := eventBroadcaster.Watch()
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
for watchEvent := range watcher.ResultChan() {
|
||||
event, ok := watchEvent.Object.(*v1.Event)
|
||||
if !ok {
|
||||
// This is all local, so there's no reason this should
|
||||
// ever happen.
|
||||
continue
|
||||
}
|
||||
eventHandler(event)
|
||||
}
|
||||
}()
|
||||
return watcher
|
||||
}
|
||||
|
||||
// NewRecorder returns an EventRecorder that records events with the given event source.
|
||||
func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder {
|
||||
return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}}
|
||||
}
|
||||
|
||||
type recorderImpl struct {
|
||||
scheme *runtime.Scheme
|
||||
source v1.EventSource
|
||||
*watch.Broadcaster
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) {
|
||||
ref, err := ref.GetReference(recorder.scheme, object)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
|
||||
return
|
||||
}
|
||||
|
||||
if !validateEventType(eventtype) {
|
||||
glog.Errorf("Unsupported event type: '%v'", eventtype)
|
||||
return
|
||||
}
|
||||
|
||||
event := recorder.makeEvent(ref, annotations, eventtype, reason, message)
|
||||
event.Source = recorder.source
|
||||
|
||||
go func() {
|
||||
// NOTE: events should be a non-blocking operation
|
||||
defer utilruntime.HandleCrash()
|
||||
recorder.Action(watch.Added, event)
|
||||
}()
|
||||
}
|
||||
|
||||
func validateEventType(eventtype string) bool {
|
||||
switch eventtype {
|
||||
case v1.EventTypeNormal, v1.EventTypeWarning:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message)
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
recorder.generateEvent(object, nil, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event {
|
||||
t := metav1.Time{Time: recorder.clock.Now()}
|
||||
namespace := ref.Namespace
|
||||
if namespace == "" {
|
||||
namespace = metav1.NamespaceDefault
|
||||
}
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
|
||||
Namespace: namespace,
|
||||
Annotations: annotations,
|
||||
},
|
||||
InvolvedObject: *ref,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
FirstTimestamp: t,
|
||||
LastTimestamp: t,
|
||||
Count: 1,
|
||||
Type: eventtype,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,462 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package record
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
)
|
||||
|
||||
const (
|
||||
maxLruCacheEntries = 4096
|
||||
|
||||
// if we see the same event that varies only by message
|
||||
// more than 10 times in a 10 minute period, aggregate the event
|
||||
defaultAggregateMaxEvents = 10
|
||||
defaultAggregateIntervalInSeconds = 600
|
||||
|
||||
// by default, allow a source to send 25 events about an object
|
||||
// but control the refill rate to 1 new event every 5 minutes
|
||||
// this helps control the long-tail of events for things that are always
|
||||
// unhealthy
|
||||
defaultSpamBurst = 25
|
||||
defaultSpamQPS = 1. / 300.
|
||||
)
|
||||
|
||||
// getEventKey builds unique event key based on source, involvedObject, reason, message
|
||||
func getEventKey(event *v1.Event) string {
|
||||
return strings.Join([]string{
|
||||
event.Source.Component,
|
||||
event.Source.Host,
|
||||
event.InvolvedObject.Kind,
|
||||
event.InvolvedObject.Namespace,
|
||||
event.InvolvedObject.Name,
|
||||
event.InvolvedObject.FieldPath,
|
||||
string(event.InvolvedObject.UID),
|
||||
event.InvolvedObject.APIVersion,
|
||||
event.Type,
|
||||
event.Reason,
|
||||
event.Message,
|
||||
},
|
||||
"")
|
||||
}
|
||||
|
||||
// getSpamKey builds unique event key based on source, involvedObject
|
||||
func getSpamKey(event *v1.Event) string {
|
||||
return strings.Join([]string{
|
||||
event.Source.Component,
|
||||
event.Source.Host,
|
||||
event.InvolvedObject.Kind,
|
||||
event.InvolvedObject.Namespace,
|
||||
event.InvolvedObject.Name,
|
||||
string(event.InvolvedObject.UID),
|
||||
event.InvolvedObject.APIVersion,
|
||||
},
|
||||
"")
|
||||
}
|
||||
|
||||
// EventFilterFunc is a function that returns true if the event should be skipped
|
||||
type EventFilterFunc func(event *v1.Event) bool
|
||||
|
||||
// EventSourceObjectSpamFilter is responsible for throttling
|
||||
// the amount of events a source and object can produce.
|
||||
type EventSourceObjectSpamFilter struct {
|
||||
sync.RWMutex
|
||||
|
||||
// the cache that manages last synced state
|
||||
cache *lru.Cache
|
||||
|
||||
// burst is the amount of events we allow per source + object
|
||||
burst int
|
||||
|
||||
// qps is the refill rate of the token bucket in queries per second
|
||||
qps float32
|
||||
|
||||
// clock is used to allow for testing over a time interval
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill.
|
||||
func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter {
|
||||
return &EventSourceObjectSpamFilter{
|
||||
cache: lru.New(lruCacheSize),
|
||||
burst: burst,
|
||||
qps: qps,
|
||||
clock: clock,
|
||||
}
|
||||
}
|
||||
|
||||
// spamRecord holds data used to perform spam filtering decisions.
|
||||
type spamRecord struct {
|
||||
// rateLimiter controls the rate of events about this object
|
||||
rateLimiter flowcontrol.RateLimiter
|
||||
}
|
||||
|
||||
// Filter controls that a given source+object are not exceeding the allowed rate.
|
||||
func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool {
|
||||
var record spamRecord
|
||||
|
||||
// controls our cached information about this event (source+object)
|
||||
eventKey := getSpamKey(event)
|
||||
|
||||
// do we have a record of similar events in our cache?
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
value, found := f.cache.Get(eventKey)
|
||||
if found {
|
||||
record = value.(spamRecord)
|
||||
}
|
||||
|
||||
// verify we have a rate limiter for this record
|
||||
if record.rateLimiter == nil {
|
||||
record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock)
|
||||
}
|
||||
|
||||
// ensure we have available rate
|
||||
filter := !record.rateLimiter.TryAccept()
|
||||
|
||||
// update the cache
|
||||
f.cache.Add(eventKey, record)
|
||||
|
||||
return filter
|
||||
}
|
||||
|
||||
// EventAggregatorKeyFunc is responsible for grouping events for aggregation
|
||||
// It returns a tuple of the following:
|
||||
// aggregateKey - key the identifies the aggregate group to bucket this event
|
||||
// localKey - key that makes this event in the local group
|
||||
type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string)
|
||||
|
||||
// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason
|
||||
func EventAggregatorByReasonFunc(event *v1.Event) (string, string) {
|
||||
return strings.Join([]string{
|
||||
event.Source.Component,
|
||||
event.Source.Host,
|
||||
event.InvolvedObject.Kind,
|
||||
event.InvolvedObject.Namespace,
|
||||
event.InvolvedObject.Name,
|
||||
string(event.InvolvedObject.UID),
|
||||
event.InvolvedObject.APIVersion,
|
||||
event.Type,
|
||||
event.Reason,
|
||||
},
|
||||
""), event.Message
|
||||
}
|
||||
|
||||
// EventAggregatorMessageFunc is responsible for producing an aggregation message
|
||||
type EventAggregatorMessageFunc func(event *v1.Event) string
|
||||
|
||||
// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message
|
||||
func EventAggregatorByReasonMessageFunc(event *v1.Event) string {
|
||||
return "(combined from similar events): " + event.Message
|
||||
}
|
||||
|
||||
// EventAggregator identifies similar events and aggregates them into a single event
|
||||
type EventAggregator struct {
|
||||
sync.RWMutex
|
||||
|
||||
// The cache that manages aggregation state
|
||||
cache *lru.Cache
|
||||
|
||||
// The function that groups events for aggregation
|
||||
keyFunc EventAggregatorKeyFunc
|
||||
|
||||
// The function that generates a message for an aggregate event
|
||||
messageFunc EventAggregatorMessageFunc
|
||||
|
||||
// The maximum number of events in the specified interval before aggregation occurs
|
||||
maxEvents uint
|
||||
|
||||
// The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new
|
||||
maxIntervalInSeconds uint
|
||||
|
||||
// clock is used to allow for testing over a time interval
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewEventAggregator returns a new instance of an EventAggregator
|
||||
func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc,
|
||||
maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator {
|
||||
return &EventAggregator{
|
||||
cache: lru.New(lruCacheSize),
|
||||
keyFunc: keyFunc,
|
||||
messageFunc: messageFunc,
|
||||
maxEvents: uint(maxEvents),
|
||||
maxIntervalInSeconds: uint(maxIntervalInSeconds),
|
||||
clock: clock,
|
||||
}
|
||||
}
|
||||
|
||||
// aggregateRecord holds data used to perform aggregation decisions
|
||||
type aggregateRecord struct {
|
||||
// we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate
|
||||
// if the size of this set exceeds the max, we know we need to aggregate
|
||||
localKeys sets.String
|
||||
// The last time at which the aggregate was recorded
|
||||
lastTimestamp metav1.Time
|
||||
}
|
||||
|
||||
// EventAggregate checks if a similar event has been seen according to the
|
||||
// aggregation configuration (max events, max interval, etc) and returns:
|
||||
//
|
||||
// - The (potentially modified) event that should be created
|
||||
// - The cache key for the event, for correlation purposes. This will be set to
|
||||
// the full key for normal events, and to the result of
|
||||
// EventAggregatorMessageFunc for aggregate events.
|
||||
func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) {
|
||||
now := metav1.NewTime(e.clock.Now())
|
||||
var record aggregateRecord
|
||||
// eventKey is the full cache key for this event
|
||||
eventKey := getEventKey(newEvent)
|
||||
// aggregateKey is for the aggregate event, if one is needed.
|
||||
aggregateKey, localKey := e.keyFunc(newEvent)
|
||||
|
||||
// Do we have a record of similar events in our cache?
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
value, found := e.cache.Get(aggregateKey)
|
||||
if found {
|
||||
record = value.(aggregateRecord)
|
||||
}
|
||||
|
||||
// Is the previous record too old? If so, make a fresh one. Note: if we didn't
|
||||
// find a similar record, its lastTimestamp will be the zero value, so we
|
||||
// create a new one in that case.
|
||||
maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second
|
||||
interval := now.Time.Sub(record.lastTimestamp.Time)
|
||||
if interval > maxInterval {
|
||||
record = aggregateRecord{localKeys: sets.NewString()}
|
||||
}
|
||||
|
||||
// Write the new event into the aggregation record and put it on the cache
|
||||
record.localKeys.Insert(localKey)
|
||||
record.lastTimestamp = now
|
||||
e.cache.Add(aggregateKey, record)
|
||||
|
||||
// If we are not yet over the threshold for unique events, don't correlate them
|
||||
if uint(record.localKeys.Len()) < e.maxEvents {
|
||||
return newEvent, eventKey
|
||||
}
|
||||
|
||||
// do not grow our local key set any larger than max
|
||||
record.localKeys.PopAny()
|
||||
|
||||
// create a new aggregate event, and return the aggregateKey as the cache key
|
||||
// (so that it can be overwritten.)
|
||||
eventCopy := &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()),
|
||||
Namespace: newEvent.Namespace,
|
||||
},
|
||||
Count: 1,
|
||||
FirstTimestamp: now,
|
||||
InvolvedObject: newEvent.InvolvedObject,
|
||||
LastTimestamp: now,
|
||||
Message: e.messageFunc(newEvent),
|
||||
Type: newEvent.Type,
|
||||
Reason: newEvent.Reason,
|
||||
Source: newEvent.Source,
|
||||
}
|
||||
return eventCopy, aggregateKey
|
||||
}
|
||||
|
||||
// eventLog records data about when an event was observed
|
||||
type eventLog struct {
|
||||
// The number of times the event has occurred since first occurrence.
|
||||
count uint
|
||||
|
||||
// The time at which the event was first recorded.
|
||||
firstTimestamp metav1.Time
|
||||
|
||||
// The unique name of the first occurrence of this event
|
||||
name string
|
||||
|
||||
// Resource version returned from previous interaction with server
|
||||
resourceVersion string
|
||||
}
|
||||
|
||||
// eventLogger logs occurrences of an event
|
||||
type eventLogger struct {
|
||||
sync.RWMutex
|
||||
cache *lru.Cache
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// newEventLogger observes events and counts their frequencies
|
||||
func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger {
|
||||
return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock}
|
||||
}
|
||||
|
||||
// eventObserve records an event, or updates an existing one if key is a cache hit
|
||||
func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) {
|
||||
var (
|
||||
patch []byte
|
||||
err error
|
||||
)
|
||||
eventCopy := *newEvent
|
||||
event := &eventCopy
|
||||
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
// Check if there is an existing event we should update
|
||||
lastObservation := e.lastEventObservationFromCache(key)
|
||||
|
||||
// If we found a result, prepare a patch
|
||||
if lastObservation.count > 0 {
|
||||
// update the event based on the last observation so patch will work as desired
|
||||
event.Name = lastObservation.name
|
||||
event.ResourceVersion = lastObservation.resourceVersion
|
||||
event.FirstTimestamp = lastObservation.firstTimestamp
|
||||
event.Count = int32(lastObservation.count) + 1
|
||||
|
||||
eventCopy2 := *event
|
||||
eventCopy2.Count = 0
|
||||
eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0))
|
||||
eventCopy2.Message = ""
|
||||
|
||||
newData, _ := json.Marshal(event)
|
||||
oldData, _ := json.Marshal(eventCopy2)
|
||||
patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event)
|
||||
}
|
||||
|
||||
// record our new observation
|
||||
e.cache.Add(
|
||||
key,
|
||||
eventLog{
|
||||
count: uint(event.Count),
|
||||
firstTimestamp: event.FirstTimestamp,
|
||||
name: event.Name,
|
||||
resourceVersion: event.ResourceVersion,
|
||||
},
|
||||
)
|
||||
return event, patch, err
|
||||
}
|
||||
|
||||
// updateState updates its internal tracking information based on latest server state
|
||||
func (e *eventLogger) updateState(event *v1.Event) {
|
||||
key := getEventKey(event)
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
// record our new observation
|
||||
e.cache.Add(
|
||||
key,
|
||||
eventLog{
|
||||
count: uint(event.Count),
|
||||
firstTimestamp: event.FirstTimestamp,
|
||||
name: event.Name,
|
||||
resourceVersion: event.ResourceVersion,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock
|
||||
func (e *eventLogger) lastEventObservationFromCache(key string) eventLog {
|
||||
value, ok := e.cache.Get(key)
|
||||
if ok {
|
||||
observationValue, ok := value.(eventLog)
|
||||
if ok {
|
||||
return observationValue
|
||||
}
|
||||
}
|
||||
return eventLog{}
|
||||
}
|
||||
|
||||
// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all
|
||||
// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur
|
||||
// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication
|
||||
// to ensure events that are observed multiple times are compacted into a single event with increasing counts.
|
||||
type EventCorrelator struct {
|
||||
// the function to filter the event
|
||||
filterFunc EventFilterFunc
|
||||
// the object that performs event aggregation
|
||||
aggregator *EventAggregator
|
||||
// the object that observes events as they come through
|
||||
logger *eventLogger
|
||||
}
|
||||
|
||||
// EventCorrelateResult is the result of a Correlate
|
||||
type EventCorrelateResult struct {
|
||||
// the event after correlation
|
||||
Event *v1.Event
|
||||
// if provided, perform a strategic patch when updating the record on the server
|
||||
Patch []byte
|
||||
// if true, do no further processing of the event
|
||||
Skip bool
|
||||
}
|
||||
|
||||
// NewEventCorrelator returns an EventCorrelator configured with default values.
|
||||
//
|
||||
// The EventCorrelator is responsible for event filtering, aggregating, and counting
|
||||
// prior to interacting with the API server to record the event.
|
||||
//
|
||||
// The default behavior is as follows:
|
||||
// * Aggregation is performed if a similar event is recorded 10 times in a
|
||||
// in a 10 minute rolling interval. A similar event is an event that varies only by
|
||||
// the Event.Message field. Rather than recording the precise event, aggregation
|
||||
// will create a new event whose message reports that it has combined events with
|
||||
// the same reason.
|
||||
// * Events are incrementally counted if the exact same event is encountered multiple
|
||||
// times.
|
||||
// * A source may burst 25 events about an object, but has a refill rate budget
|
||||
// per object of 1 event every 5 minutes to control long-tail of spam.
|
||||
func NewEventCorrelator(clock clock.Clock) *EventCorrelator {
|
||||
cacheSize := maxLruCacheEntries
|
||||
spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock)
|
||||
return &EventCorrelator{
|
||||
filterFunc: spamFilter.Filter,
|
||||
aggregator: NewEventAggregator(
|
||||
cacheSize,
|
||||
EventAggregatorByReasonFunc,
|
||||
EventAggregatorByReasonMessageFunc,
|
||||
defaultAggregateMaxEvents,
|
||||
defaultAggregateIntervalInSeconds,
|
||||
clock),
|
||||
|
||||
logger: newEventLogger(cacheSize, clock),
|
||||
}
|
||||
}
|
||||
|
||||
// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events
|
||||
func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) {
|
||||
if newEvent == nil {
|
||||
return nil, fmt.Errorf("event is nil")
|
||||
}
|
||||
aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent)
|
||||
observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey)
|
||||
if c.filterFunc(observedEvent) {
|
||||
return &EventCorrelateResult{Skip: true}, nil
|
||||
}
|
||||
return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err
|
||||
}
|
||||
|
||||
// UpdateState based on the latest observed state from server
|
||||
func (c *EventCorrelator) UpdateState(event *v1.Event) {
|
||||
c.logger.updateState(event)
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package record
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// FakeRecorder is used as a fake during tests. It is thread safe. It is usable
|
||||
// when created manually and not by NewFakeRecorder, however all events may be
|
||||
// thrown away in this case.
|
||||
type FakeRecorder struct {
|
||||
Events chan string
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
f.Eventf(object, eventtype, reason, messageFmt, args)
|
||||
}
|
||||
|
||||
// NewFakeRecorder creates new fake event recorder with event channel with
|
||||
// buffer of given size.
|
||||
func NewFakeRecorder(bufferSize int) *FakeRecorder {
|
||||
return &FakeRecorder{
|
||||
Events: make(chan string, bufferSize),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
name: Breaking Change
|
||||
about: Makes a breaking change to knative/pkg
|
||||
title: ''
|
||||
labels:
|
||||
assignees: 'mattmoor'
|
||||
|
||||
---
|
||||
|
||||
BREAKING CHANGES MUST STAGE CHANGES ONTO DOWNSTREAM
|
||||
KNATIVE REPOSITORIES
|
||||
|
||||
|
||||
| Repo | Pull Request |
|
||||
|-------------------|--------------------------------|
|
||||
| Build | knative/build#1234 |
|
||||
| Eventing | knative/eventing#1234 |
|
||||
| Serving | knative/serving#1234 |
|
||||
| Sample Controller | knative/sample-controller#1234 |
|
||||
|
||||
|
||||
cc @n3wscott @jasonhall
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
name: Bug Fix
|
||||
about: Fixes a bug in knative/pkg
|
||||
title: ''
|
||||
labels: kind/bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Fixes:
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
name: Normal Change
|
||||
about: Makes a boring change to the repo.
|
||||
title: ''
|
||||
labels:
|
||||
assignees: ''
|
||||
|
||||
---
|
|
@ -46,7 +46,7 @@ export PATH="${PATH}:${GOPATH}/bin"
|
|||
### Checkout your fork
|
||||
|
||||
The Go tools require that you clone the repository to the
|
||||
`src/github.com/knative/pkg` directory in your
|
||||
`src/knative.dev/pkg` directory in your
|
||||
[`GOPATH`](https://github.com/golang/go/wiki/SettingGOPATH).
|
||||
|
||||
To check out this repository:
|
||||
|
@ -56,8 +56,8 @@ To check out this repository:
|
|||
1. Clone it to your machine:
|
||||
|
||||
```shell
|
||||
mkdir -p ${GOPATH}/src/github.com/knative
|
||||
cd ${GOPATH}/src/github.com/knative
|
||||
mkdir -p ${GOPATH}/src/knative.dev
|
||||
cd ${GOPATH}/src/knative.dev
|
||||
git clone git@github.com:${YOUR_GITHUB_USERNAME}/pkg.git
|
||||
cd pkg
|
||||
git remote add upstream git@github.com:knative/pkg.git
|
|
@ -300,14 +300,14 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1bfc083da5bbeb7abaac53c56890eb14eb11bac9ec985bfe338c4bbb0540c9ba"
|
||||
digest = "1:fa87bf3dd5923ba242fd7b496eb170a158e449f6b4a9ba3f0890c551b808f620"
|
||||
name = "github.com/knative/test-infra"
|
||||
packages = [
|
||||
"scripts",
|
||||
"tools/dep-collector",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "1576da30069624094cf01719452da944b3046826"
|
||||
revision = "0d9dc298d262fe0b0f6944aa9d45b9ab7f6b34ea"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde"
|
||||
|
@ -780,6 +780,27 @@
|
|||
revision = "145d52631d00cbfe68490d19ae4f0f501fd31a95"
|
||||
version = "kubernetes-1.12.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6def8acd040e85b3becb1dd73de1f22a787fd9776e32aa3bbac9cf9727b53d0d"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
packages = [
|
||||
"pkg/apis/apiextensions",
|
||||
"pkg/apis/apiextensions/v1beta1",
|
||||
"pkg/client/clientset/clientset",
|
||||
"pkg/client/clientset/clientset/fake",
|
||||
"pkg/client/clientset/clientset/scheme",
|
||||
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1",
|
||||
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake",
|
||||
"pkg/client/informers/externalversions",
|
||||
"pkg/client/informers/externalversions/apiextensions",
|
||||
"pkg/client/informers/externalversions/apiextensions/v1beta1",
|
||||
"pkg/client/informers/externalversions/internalinterfaces",
|
||||
"pkg/client/listers/apiextensions/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "bd0469a053ff88529a61145790499fe78a09a49d"
|
||||
version = "kubernetes-1.12.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:119ae04ee44c5d179dcde1ee686f057cfe3fc54a7ee8484b920932a80309e88b"
|
||||
name = "k8s.io/apimachinery"
|
||||
|
@ -836,7 +857,7 @@
|
|||
version = "kubernetes-1.12.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:07be043078c2dc2ee33e81278b264a84f364c6d711811d2932aa42212fc4f2ae"
|
||||
digest = "1:b7dd0420e85cb2968ffb945f2810ea6c796dc2a08660618e2200c08c596f0624"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
|
@ -984,11 +1005,9 @@
|
|||
"pkg/apis/clientauthentication/v1beta1",
|
||||
"pkg/version",
|
||||
"plugin/pkg/client/auth/exec",
|
||||
"plugin/pkg/client/auth/gcp",
|
||||
"rest",
|
||||
"rest/watch",
|
||||
"testing",
|
||||
"third_party/forked/golang/template",
|
||||
"tools/auth",
|
||||
"tools/cache",
|
||||
"tools/clientcmd",
|
||||
|
@ -1006,7 +1025,6 @@
|
|||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/jsonpath",
|
||||
"util/retry",
|
||||
"util/workqueue",
|
||||
]
|
||||
|
@ -1121,6 +1139,10 @@
|
|||
"k8s.io/api/batch/v1",
|
||||
"k8s.io/api/core/v1",
|
||||
"k8s.io/api/rbac/v1",
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset",
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake",
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions",
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1",
|
||||
"k8s.io/apimachinery/pkg/api/equality",
|
||||
"k8s.io/apimachinery/pkg/api/errors",
|
||||
"k8s.io/apimachinery/pkg/api/meta",
|
||||
|
@ -1149,12 +1171,13 @@
|
|||
"k8s.io/client-go/informers/apps/v1",
|
||||
"k8s.io/client-go/informers/autoscaling/v1",
|
||||
"k8s.io/client-go/informers/autoscaling/v2beta1",
|
||||
"k8s.io/client-go/informers/batch/v1",
|
||||
"k8s.io/client-go/informers/core/v1",
|
||||
"k8s.io/client-go/informers/rbac/v1",
|
||||
"k8s.io/client-go/kubernetes",
|
||||
"k8s.io/client-go/kubernetes/fake",
|
||||
"k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1",
|
||||
"k8s.io/client-go/kubernetes/typed/core/v1",
|
||||
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
|
||||
"k8s.io/client-go/rest",
|
||||
"k8s.io/client-go/testing",
|
||||
"k8s.io/client-go/tools/cache",
|
|
@ -18,6 +18,10 @@ required = [
|
|||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.12.6"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
version = "kubernetes-1.12.6"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.12.6"
|
|
@ -0,0 +1,91 @@
|
|||
# Releasing knative/pkg
|
||||
|
||||
We release the components of Knative every 6 weeks. All of these components must
|
||||
be moved to the latest "release" of the knative/pkg shared library prior to each
|
||||
release, but likely this should happen incrementally over each milestone.
|
||||
|
||||
## Release Process.
|
||||
|
||||
### Step #1: Monday the week prior to each release.
|
||||
|
||||
On Monday of the week prior to the Knative release, each of the downstream
|
||||
repositories should stage a `[WIP]` Pull Request that advances the knative/pkg
|
||||
dependency to the latest commit.
|
||||
|
||||
At present, these downstream repositories include:
|
||||
|
||||
1. knative/serving
|
||||
1. knative/eventing
|
||||
1. knative/eventing-contrib
|
||||
1. knative/build
|
||||
1. knative/sample-controller
|
||||
1. GoogleCloudPlatform/cloud-run-events
|
||||
|
||||
> The automation that auto-bumps these lives
|
||||
> [here](https://github.com/mattmoor/knobots/tree/knative/cmd/periodic/kodata).
|
||||
|
||||
`Gopkg.toml` should look like:
|
||||
|
||||
```toml
|
||||
[[override]]
|
||||
name = "github.com/knative/pkg"
|
||||
branch = "master"
|
||||
```
|
||||
|
||||
Then the following is run:
|
||||
|
||||
```shell
|
||||
dep ensure -update github.com/knative/pkg
|
||||
./hack/update-codegen.sh
|
||||
```
|
||||
|
||||
If problems are found, they are addressed and the update is merged, and this
|
||||
process repeats until knative/pkg can be cleanly updated without any changes.
|
||||
|
||||
> If by mid-week, we do not have a clean PR in each repository, the person
|
||||
> driving the update should escalate to avoid delaying the release.
|
||||
|
||||
### Step #2: Friday the week prior to each release.
|
||||
|
||||
A release branch is snapped on knative/pkg with the form `release-0.X` where `X`
|
||||
reflects the forthcoming Knative release. The commit at which this branch is
|
||||
snapped will be the version that has been staged into `[WIP]` PRs in every
|
||||
repository.
|
||||
|
||||
These staging PRs are then updated to:
|
||||
|
||||
```toml
|
||||
[[override]]
|
||||
name = "github.com/knative/pkg"
|
||||
# The 0.X release branch.
|
||||
branch = "release-0.X"
|
||||
```
|
||||
|
||||
The `[WIP]` is removed, and the PRs are reviewed and merged.
|
||||
|
||||
## Backporting Fixes
|
||||
|
||||
If a problem is found in knative/pkg in an older release and a fix must be
|
||||
backported then it should first be fixed at HEAD (if still relevant). It should
|
||||
be called out and likely discussed (with a lead) in the original PR whether the
|
||||
fix is desired and eligible for back-porting to a release branch. This may raise
|
||||
the review bar, or lead to trade-offs in design; it is better to front-load this
|
||||
consideration to avoid delaying a cherry-pick.
|
||||
|
||||
Once that PR has merged (if still relevant), the same commit should be
|
||||
cherry-picked onto `release-0.Y` and any merge problems fixed up.
|
||||
|
||||
Once the change is ready, a PR should be sent against `release-0.Y` with the
|
||||
prefix `[RELEASE-0.Y] Your PR Title` to clearly designate this PR as targeting
|
||||
the release branch. A lead will review and make a ruling on the PR, but if this
|
||||
consideration was front-loaded, it should be a short review.
|
||||
|
||||
### Picking up fixes
|
||||
|
||||
Downstream repositories should reference `knative/pkg` release branches from
|
||||
their own release branches, so to update the `knative/pkg` dependency we run:
|
||||
|
||||
```shell
|
||||
dep ensure -update github.com/knative/pkg
|
||||
./hack/update-deps.sh
|
||||
```
|
|
@ -42,10 +42,26 @@ func IsInCreate(ctx context.Context) bool {
|
|||
// the receiver being validated is being updated.
|
||||
type inUpdateKey struct{}
|
||||
|
||||
type updatePayload struct {
|
||||
base interface{}
|
||||
subresource string
|
||||
}
|
||||
|
||||
// WithinUpdate is used to note that the webhook is calling within
|
||||
// the context of a Update operation.
|
||||
func WithinUpdate(ctx context.Context, base interface{}) context.Context {
|
||||
return context.WithValue(ctx, inUpdateKey{}, base)
|
||||
return context.WithValue(ctx, inUpdateKey{}, &updatePayload{
|
||||
base: base,
|
||||
})
|
||||
}
|
||||
|
||||
// WithinSubResourceUpdate is used to note that the webhook is calling within
|
||||
// the context of a Update operation on a subresource.
|
||||
func WithinSubResourceUpdate(ctx context.Context, base interface{}, sr string) context.Context {
|
||||
return context.WithValue(ctx, inUpdateKey{}, &updatePayload{
|
||||
base: base,
|
||||
subresource: sr,
|
||||
})
|
||||
}
|
||||
|
||||
// IsInUpdate checks whether the context is an Update.
|
||||
|
@ -53,10 +69,24 @@ func IsInUpdate(ctx context.Context) bool {
|
|||
return ctx.Value(inUpdateKey{}) != nil
|
||||
}
|
||||
|
||||
// IsInStatusUpdate checks whether the context is an Update.
|
||||
func IsInStatusUpdate(ctx context.Context) bool {
|
||||
value := ctx.Value(inUpdateKey{})
|
||||
if value == nil {
|
||||
return false
|
||||
}
|
||||
up := value.(*updatePayload)
|
||||
return up.subresource == "status"
|
||||
}
|
||||
|
||||
// GetBaseline returns the baseline of the update, or nil when we
|
||||
// are not within an update context.
|
||||
func GetBaseline(ctx context.Context) interface{} {
|
||||
return ctx.Value(inUpdateKey{})
|
||||
value := ctx.Value(inUpdateKey{})
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
return value.(*updatePayload).base
|
||||
}
|
||||
|
||||
// This is attached to contexts passed to webhook interfaces when
|
|
@ -29,7 +29,7 @@ import (
|
|||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"knative.dev/pkg/apis"
|
||||
)
|
||||
|
||||
// TypedInformerFactory implements InformerFactory such that the elements
|
||||
|
@ -94,44 +94,36 @@ func AsStructuredWatcher(wf cache.WatchFunc, obj runtime.Object) cache.WatchFunc
|
|||
go func() {
|
||||
defer close(structuredCh)
|
||||
unstructuredCh := uw.ResultChan()
|
||||
for {
|
||||
select {
|
||||
case ue, ok := <-unstructuredCh:
|
||||
if !ok {
|
||||
// Channel is closed.
|
||||
return
|
||||
}
|
||||
for ue := range unstructuredCh {
|
||||
unstructuredObj, ok := ue.Object.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
// If it isn't an unstructured object, then forward the
|
||||
// event as-is. This is likely to happen when the event's
|
||||
// Type is an Error.
|
||||
structuredCh <- ue
|
||||
continue
|
||||
}
|
||||
structuredObj := obj.DeepCopyObject()
|
||||
|
||||
unstructuredObj, ok := ue.Object.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
// If it isn't an unstructured object, then forward the
|
||||
// event as-is. This is likely to happen when the event's
|
||||
// Type is an Error.
|
||||
structuredCh <- ue
|
||||
continue
|
||||
}
|
||||
structuredObj := obj.DeepCopyObject()
|
||||
|
||||
err := FromUnstructured(unstructuredObj, structuredObj)
|
||||
if err != nil {
|
||||
// Pass back an error indicating that the object we got
|
||||
// was invalid.
|
||||
structuredCh <- watch.Event{
|
||||
Type: watch.Error,
|
||||
Object: &metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusUnprocessableEntity,
|
||||
Reason: metav1.StatusReasonInvalid,
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Send the structured event.
|
||||
err := FromUnstructured(unstructuredObj, structuredObj)
|
||||
if err != nil {
|
||||
// Pass back an error indicating that the object we got
|
||||
// was invalid.
|
||||
structuredCh <- watch.Event{
|
||||
Type: ue.Type,
|
||||
Object: structuredObj,
|
||||
Type: watch.Error,
|
||||
Object: &metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusUnprocessableEntity,
|
||||
Reason: metav1.StatusReasonInvalid,
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Send the structured event.
|
||||
structuredCh <- watch.Event{
|
||||
Type: ue.Type,
|
||||
Object: structuredObj,
|
||||
}
|
||||
}
|
||||
}()
|
|
@ -20,9 +20,9 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
"github.com/knative/pkg/apis/duck/v1beta1"
|
||||
"knative.dev/pkg/apis"
|
||||
"knative.dev/pkg/apis/duck"
|
||||
"knative.dev/pkg/apis/duck/v1beta1"
|
||||
)
|
||||
|
||||
// Addressable provides a generic mechanism for a custom resource
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
"fmt"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"knative.dev/pkg/apis"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
|
@ -23,8 +23,8 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
"knative.dev/pkg/apis"
|
||||
"knative.dev/pkg/apis/duck"
|
||||
)
|
||||
|
||||
// Conditions is the schema for the conditions portion of the payload
|
|
@ -20,8 +20,8 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
"knative.dev/pkg/apis"
|
||||
"knative.dev/pkg/apis/duck"
|
||||
)
|
||||
|
||||
// LegacyTargetable left around until we migrate to Addressable in the
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
"knative.dev/pkg/apis/duck"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
@ -20,8 +20,8 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
"knative.dev/pkg/apis"
|
||||
"knative.dev/pkg/apis/duck"
|
||||
)
|
||||
|
||||
// Targetable is an earlier version of the Callable interface.
|
|
@ -20,8 +20,8 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
"knative.dev/pkg/apis"
|
||||
"knative.dev/pkg/apis/duck"
|
||||
)
|
||||
|
||||
// Addressable provides a generic mechanism for a custom resource
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
"knative.dev/pkg/apis/duck"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
@ -24,8 +24,8 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
"knative.dev/pkg/apis"
|
||||
"knative.dev/pkg/apis/duck"
|
||||
)
|
||||
|
||||
// Conditions is a simple wrapper around apis.Conditions to implement duck.Implementable.
|
|
@ -21,8 +21,8 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
apis "github.com/knative/pkg/apis"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
apis "knative.dev/pkg/apis"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
@ -20,7 +20,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/knative/pkg/kmp"
|
||||
"knative.dev/pkg/kmp"
|
||||
)
|
||||
|
||||
// Implementable is implemented by the Fooable duck type that consumers
|
|
@ -21,7 +21,7 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/knative/pkg/kmp"
|
||||
"knative.dev/pkg/kmp"
|
||||
)
|
||||
|
||||
// CurrentField is a constant to supply as a fieldPath for when there is
|
|
@ -66,3 +66,10 @@ type Listable interface {
|
|||
// The webhook functionality for this has been turned down, which is why this
|
||||
// interface is empty.
|
||||
type Annotatable interface{}
|
||||
|
||||
// HasSpec indicates that a particular type has a specification information
|
||||
// and that information is retrievable.
|
||||
type HasSpec interface {
|
||||
// GetUntypedSpec returns the spec of the resource.
|
||||
GetUntypedSpec() interface{}
|
||||
}
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"github.com/knative/pkg/apis/istio/common/v1alpha1"
|
||||
"knative.dev/pkg/apis/istio/common/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"github.com/knative/pkg/apis/istio/authentication"
|
||||
"knative.dev/pkg/apis/istio/authentication"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
@ -21,8 +21,8 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
commonv1alpha1 "github.com/knative/pkg/apis/istio/common/v1alpha1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
commonv1alpha1 "knative.dev/pkg/apis/istio/common/v1alpha1"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1alpha3
|
||||
|
||||
import (
|
||||
"github.com/knative/pkg/apis/istio"
|
||||
"knative.dev/pkg/apis/istio"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1alpha3
|
||||
|
||||
import (
|
||||
"github.com/knative/pkg/apis/istio/common/v1alpha1"
|
||||
"knative.dev/pkg/apis/istio/common/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
|
@ -21,8 +21,8 @@ limitations under the License.
|
|||
package v1alpha3
|
||||
|
||||
import (
|
||||
v1alpha1 "github.com/knative/pkg/apis/istio/common/v1alpha1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
v1alpha1 "knative.dev/pkg/apis/istio/common/v1alpha1"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
@ -19,8 +19,8 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
duckv1b1 "github.com/knative/pkg/apis/duck/v1beta1"
|
||||
"knative.dev/pkg/apis"
|
||||
duckv1b1 "knative.dev/pkg/apis/duck/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
|
@ -19,11 +19,11 @@ limitations under the License.
|
|||
package versioned
|
||||
|
||||
import (
|
||||
authenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
|
||||
networkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
rest "k8s.io/client-go/rest"
|
||||
flowcontrol "k8s.io/client-go/util/flowcontrol"
|
||||
authenticationv1alpha1 "knative.dev/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
|
||||
networkingv1alpha3 "knative.dev/pkg/client/clientset/versioned/typed/istio/v1alpha3"
|
||||
)
|
||||
|
||||
type Interface interface {
|
|
@ -19,16 +19,16 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
clientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
authenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
|
||||
fakeauthenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake"
|
||||
networkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3"
|
||||
fakenetworkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/discovery"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
"k8s.io/client-go/testing"
|
||||
clientset "knative.dev/pkg/client/clientset/versioned"
|
||||
authenticationv1alpha1 "knative.dev/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
|
||||
fakeauthenticationv1alpha1 "knative.dev/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake"
|
||||
networkingv1alpha3 "knative.dev/pkg/client/clientset/versioned/typed/istio/v1alpha3"
|
||||
fakenetworkingv1alpha3 "knative.dev/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake"
|
||||
)
|
||||
|
||||
// NewSimpleClientset returns a clientset that will respond with the provided objects.
|
|
@ -19,13 +19,13 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
|
||||
networkingv1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
authenticationv1alpha1 "knative.dev/pkg/apis/istio/authentication/v1alpha1"
|
||||
networkingv1alpha3 "knative.dev/pkg/apis/istio/v1alpha3"
|
||||
)
|
||||
|
||||
var scheme = runtime.NewScheme()
|
|
@ -19,13 +19,13 @@ limitations under the License.
|
|||
package scheme
|
||||
|
||||
import (
|
||||
authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
|
||||
networkingv1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
authenticationv1alpha1 "knative.dev/pkg/apis/istio/authentication/v1alpha1"
|
||||
networkingv1alpha3 "knative.dev/pkg/apis/istio/v1alpha3"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
|
@ -19,10 +19,10 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
|
||||
"github.com/knative/pkg/client/clientset/versioned/scheme"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
rest "k8s.io/client-go/rest"
|
||||
v1alpha1 "knative.dev/pkg/apis/istio/authentication/v1alpha1"
|
||||
"knative.dev/pkg/client/clientset/versioned/scheme"
|
||||
)
|
||||
|
||||
type AuthenticationV1alpha1Interface interface {
|
|
@ -19,9 +19,9 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
v1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
|
||||
rest "k8s.io/client-go/rest"
|
||||
testing "k8s.io/client-go/testing"
|
||||
v1alpha1 "knative.dev/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
|
||||
)
|
||||
|
||||
type FakeAuthenticationV1alpha1 struct {
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue