Generate and format files
- Run hack/update-codegen.sh - Run hack/update-generated-device-plugin.sh - Run hack/update-generated-protobuf.sh - Run hack/update-generated-runtime.sh - Run hack/update-generated-swagger-docs.sh - Run hack/update-openapi-spec.sh - Run hack/update-gofmt.sh Replay of a9593d634c6a053848413e600dadbf974627515f Kubernetes-commit: 15d9d196476d64482189f00f1cf1a2061aea5b35
This commit is contained in:
parent
603d049f4a
commit
b3ad9fb4e3
|
@ -52,9 +52,10 @@ type WantsQuotaConfiguration interface {
|
||||||
// WantsFeatureGate defines a function which passes the featureGates for inspection by an admission plugin.
|
// WantsFeatureGate defines a function which passes the featureGates for inspection by an admission plugin.
|
||||||
// Admission plugins should not hold a reference to the featureGates. Instead, they should query a particular one
|
// Admission plugins should not hold a reference to the featureGates. Instead, they should query a particular one
|
||||||
// and assign it to a simple bool in the admission plugin struct.
|
// and assign it to a simple bool in the admission plugin struct.
|
||||||
// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){
|
//
|
||||||
// a.myFeatureIsOn = features.Enabled("my-feature")
|
// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){
|
||||||
// }
|
// a.myFeatureIsOn = features.Enabled("my-feature")
|
||||||
|
// }
|
||||||
type WantsFeatures interface {
|
type WantsFeatures interface {
|
||||||
InspectFeatureGates(featuregate.FeatureGate)
|
InspectFeatureGates(featuregate.FeatureGate)
|
||||||
admission.InitializationValidator
|
admission.InitializationValidator
|
||||||
|
|
|
@ -202,16 +202,16 @@ func (e *quotaEvaluator) checkAttributes(ns string, admissionAttributes []*admis
|
||||||
|
|
||||||
// checkQuotas checks the admission attributes against the passed quotas. If a quota applies, it will attempt to update it
|
// checkQuotas checks the admission attributes against the passed quotas. If a quota applies, it will attempt to update it
|
||||||
// AFTER it has checked all the admissionAttributes. The method breaks down into phase like this:
|
// AFTER it has checked all the admissionAttributes. The method breaks down into phase like this:
|
||||||
// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the
|
// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the
|
||||||
// originals
|
// originals
|
||||||
// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed
|
// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed
|
||||||
// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change
|
// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change
|
||||||
// mark the waiter as succeeded. If some quota did change, update the running quotas
|
// mark the waiter as succeeded. If some quota did change, update the running quotas
|
||||||
// 2. If no running quota was changed, return now since no updates are needed.
|
// 2. If no running quota was changed, return now since no updates are needed.
|
||||||
// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some
|
// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some
|
||||||
// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version
|
// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version
|
||||||
// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota
|
// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota
|
||||||
// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded.
|
// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded.
|
||||||
func (e *quotaEvaluator) checkQuotas(quotas []corev1.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) {
|
func (e *quotaEvaluator) checkQuotas(quotas []corev1.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) {
|
||||||
// yet another copy to compare against originals to see if we actually have deltas
|
// yet another copy to compare against originals to see if we actually have deltas
|
||||||
originalQuotas, err := copyQuotas(quotas)
|
originalQuotas, err := copyQuotas(quotas)
|
||||||
|
|
|
@ -18,15 +18,17 @@ limitations under the License.
|
||||||
// source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
|
// source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package v1 is a generated protocol buffer package.
|
Package v1 is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
|
||||||
ReplicaSet
|
|
||||||
ReplicaSetSpec
|
It has these top-level messages:
|
||||||
ReplicaSetStatus
|
|
||||||
|
ReplicaSet
|
||||||
|
ReplicaSetSpec
|
||||||
|
ReplicaSetStatus
|
||||||
*/
|
*/
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
|
|
|
@ -43,8 +43,9 @@ var leadingDigits = regexp.MustCompile(`^(\d+)`)
|
||||||
|
|
||||||
// MajorMinor parses a numeric major/minor version from the provided version info.
|
// MajorMinor parses a numeric major/minor version from the provided version info.
|
||||||
// The minor version drops all characters after the first non-digit character:
|
// The minor version drops all characters after the first non-digit character:
|
||||||
// version.Info{Major:"1", Minor:"2+"} -> 1,2
|
//
|
||||||
// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2
|
// version.Info{Major:"1", Minor:"2+"} -> 1,2
|
||||||
|
// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2
|
||||||
func MajorMinor(v version.Info) (int, int, error) {
|
func MajorMinor(v version.Info) (int, int, error) {
|
||||||
major, err := strconv.Atoi(v.Major)
|
major, err := strconv.Atoi(v.Major)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -56,9 +56,9 @@ func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion sch
|
||||||
// ToSubresource filter the managed fields of the main resource and convert
|
// ToSubresource filter the managed fields of the main resource and convert
|
||||||
// them so that they can be handled by scale.
|
// them so that they can be handled by scale.
|
||||||
// For the managed fields that have a replicas path it performs two changes:
|
// For the managed fields that have a replicas path it performs two changes:
|
||||||
// 1. APIVersion is changed to the APIVersion of the scale subresource
|
// 1. APIVersion is changed to the APIVersion of the scale subresource
|
||||||
// 2. Replicas path of the main resource is transformed to the replicas path of
|
// 2. Replicas path of the main resource is transformed to the replicas path of
|
||||||
// the scale subresource
|
// the scale subresource
|
||||||
func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) {
|
func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) {
|
||||||
managed, err := DecodeManagedFields(h.parentEntries)
|
managed, err := DecodeManagedFields(h.parentEntries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -41,7 +41,7 @@ func TestNamespaceContext(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//TestUserContext validates that a userinfo can be get/set on a context object
|
// TestUserContext validates that a userinfo can be get/set on a context object
|
||||||
func TestUserContext(t *testing.T) {
|
func TestUserContext(t *testing.T) {
|
||||||
ctx := NewContext()
|
ctx := NewContext()
|
||||||
_, ok := UserFrom(ctx)
|
_, ok := UserFrom(ctx)
|
||||||
|
|
|
@ -42,19 +42,19 @@ type UserProvidedDecorator interface {
|
||||||
// object implements the same subset of those interfaces as the inner http.ResponseWriter.
|
// object implements the same subset of those interfaces as the inner http.ResponseWriter.
|
||||||
//
|
//
|
||||||
// This function handles the following three casses.
|
// This function handles the following three casses.
|
||||||
// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`,
|
// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`,
|
||||||
// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter).
|
// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter).
|
||||||
// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher`
|
// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher`
|
||||||
// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter).
|
// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter).
|
||||||
// - All the other cases collapse to this one, in which the given ResponseWriter is returned.
|
// - All the other cases collapse to this one, in which the given ResponseWriter is returned.
|
||||||
//
|
//
|
||||||
// There are three applicable terms:
|
// There are three applicable terms:
|
||||||
// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function.
|
// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function.
|
||||||
// - "user-provided decorator" or "middle": this is the user-provided decorator
|
// - "user-provided decorator" or "middle": this is the user-provided decorator
|
||||||
// that decorates an inner ResponseWriter object. A user-provided decorator
|
// that decorates an inner ResponseWriter object. A user-provided decorator
|
||||||
// implements the UserProvidedDecorator interface. A user-provided decorator
|
// implements the UserProvidedDecorator interface. A user-provided decorator
|
||||||
// may or may not implement http.CloseNotifier, http.Flusher or http.Hijacker.
|
// may or may not implement http.CloseNotifier, http.Flusher or http.Hijacker.
|
||||||
// - "inner": the ResponseWriter that the user-provided decorator extends.
|
// - "inner": the ResponseWriter that the user-provided decorator extends.
|
||||||
func WrapForHTTP1Or2(decorator UserProvidedDecorator) http.ResponseWriter {
|
func WrapForHTTP1Or2(decorator UserProvidedDecorator) http.ResponseWriter {
|
||||||
// from go net/http documentation:
|
// from go net/http documentation:
|
||||||
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations support Flusher
|
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations support Flusher
|
||||||
|
|
|
@ -900,13 +900,13 @@ func markAsDeleting(obj runtime.Object, now time.Time) (err error) {
|
||||||
// grace period seconds (graceful deletion) and updating the list of
|
// grace period seconds (graceful deletion) and updating the list of
|
||||||
// finalizers (finalization); it returns:
|
// finalizers (finalization); it returns:
|
||||||
//
|
//
|
||||||
// 1. an error
|
// 1. an error
|
||||||
// 2. a boolean indicating that the object was not found, but it should be
|
// 2. a boolean indicating that the object was not found, but it should be
|
||||||
// ignored
|
// ignored
|
||||||
// 3. a boolean indicating that the object's grace period is exhausted and it
|
// 3. a boolean indicating that the object's grace period is exhausted and it
|
||||||
// should be deleted immediately
|
// should be deleted immediately
|
||||||
// 4. a new output object with the state that was updated
|
// 4. a new output object with the state that was updated
|
||||||
// 5. a copy of the last existing state of the object
|
// 5. a copy of the last existing state of the object
|
||||||
func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
|
func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
|
||||||
lastGraceful := int64(0)
|
lastGraceful := int64(0)
|
||||||
var pendingFinalizers bool
|
var pendingFinalizers bool
|
||||||
|
|
|
@ -875,8 +875,8 @@ func (t *Tester) testDeleteNonExist(obj runtime.Object, opts metav1.DeleteOption
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test the fast-fail path. We test that the precondition gets verified
|
// This test the fast-fail path. We test that the precondition gets verified
|
||||||
// again before deleting the object in tests of pkg/storage/etcd.
|
// again before deleting the object in tests of pkg/storage/etcd.
|
||||||
func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
|
func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
|
||||||
ctx := t.TestContext()
|
ctx := t.TestContext()
|
||||||
|
|
||||||
|
@ -912,8 +912,8 @@ func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getF
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test the fast-fail path. We test that the precondition gets verified
|
// This test the fast-fail path. We test that the precondition gets verified
|
||||||
// again before deleting the object in tests of pkg/storage/etcd.
|
// again before deleting the object in tests of pkg/storage/etcd.
|
||||||
func (t *Tester) testDeleteWithResourceVersion(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
|
func (t *Tester) testDeleteWithResourceVersion(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
|
||||||
ctx := t.TestContext()
|
ctx := t.TestContext()
|
||||||
|
|
||||||
|
|
|
@ -71,13 +71,13 @@ func withFakeUser(handler http.Handler) http.Handler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that MaxInFlightLimit works, i.e.
|
// Tests that MaxInFlightLimit works, i.e.
|
||||||
// - "long" requests such as proxy or watch, identified by regexp are not accounted despite
|
// - "long" requests such as proxy or watch, identified by regexp are not accounted despite
|
||||||
// hanging for the long time,
|
// hanging for the long time,
|
||||||
// - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the
|
// - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the
|
||||||
// constructor in flight at any given moment,
|
// constructor in flight at any given moment,
|
||||||
// - subsequent "short" requests are rejected instantly with appropriate error,
|
// - subsequent "short" requests are rejected instantly with appropriate error,
|
||||||
// - subsequent "long" requests are handled normally,
|
// - subsequent "long" requests are handled normally,
|
||||||
// - we correctly recover after some "short" requests finish, i.e. we can process new ones.
|
// - we correctly recover after some "short" requests finish, i.e. we can process new ones.
|
||||||
func TestMaxInFlightNonMutating(t *testing.T) {
|
func TestMaxInFlightNonMutating(t *testing.T) {
|
||||||
const AllowedNonMutatingInflightRequestsNo = 3
|
const AllowedNonMutatingInflightRequestsNo = 3
|
||||||
|
|
||||||
|
|
|
@ -154,7 +154,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
d.nonGoRestfulMux.ServeHTTP(w, req)
|
d.nonGoRestfulMux.ServeHTTP(w, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: Unify with RecoverPanics?
|
// TODO: Unify with RecoverPanics?
|
||||||
func logStackOnRecover(s runtime.NegotiatedSerializer, panicReason interface{}, w http.ResponseWriter) {
|
func logStackOnRecover(s runtime.NegotiatedSerializer, panicReason interface{}, w http.ResponseWriter) {
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))
|
buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))
|
||||||
|
|
|
@ -16,6 +16,7 @@ limitations under the License.
|
||||||
|
|
||||||
// Package healthz implements basic http server health checking.
|
// Package healthz implements basic http server health checking.
|
||||||
// Usage:
|
// Usage:
|
||||||
// import "k8s.io/apiserver/pkg/server/healthz"
|
//
|
||||||
// healthz.InstallHandler(mux)
|
// import "k8s.io/apiserver/pkg/server/healthz"
|
||||||
|
// healthz.InstallHandler(mux)
|
||||||
package healthz // import "k8s.io/apiserver/pkg/server/healthz"
|
package healthz // import "k8s.io/apiserver/pkg/server/healthz"
|
||||||
|
|
|
@ -35,6 +35,7 @@ import (
|
||||||
// 2. conflicts between the different processes all trying to perform the same action
|
// 2. conflicts between the different processes all trying to perform the same action
|
||||||
// 3. partially complete work (API server crashes while running your hook)
|
// 3. partially complete work (API server crashes while running your hook)
|
||||||
// 4. API server access **BEFORE** your hook has completed
|
// 4. API server access **BEFORE** your hook has completed
|
||||||
|
//
|
||||||
// Think of it like a mini-controller that is super privileged and gets to run in-process
|
// Think of it like a mini-controller that is super privileged and gets to run in-process
|
||||||
// If you use this feature, tag @deads2k on github who has promised to review code for anyone's PostStartHook
|
// If you use this feature, tag @deads2k on github who has promised to review code for anyone's PostStartHook
|
||||||
// until it becomes easier to use.
|
// until it becomes easier to use.
|
||||||
|
|
|
@ -107,8 +107,8 @@ type lifecycleSignal interface {
|
||||||
// for us to write unit tests that can verify expected graceful termination behavior.
|
// for us to write unit tests that can verify expected graceful termination behavior.
|
||||||
//
|
//
|
||||||
// GenericAPIServer can use these to either:
|
// GenericAPIServer can use these to either:
|
||||||
// - signal that a particular termination event has transpired
|
// - signal that a particular termination event has transpired
|
||||||
// - wait for a designated termination event to transpire and do some action.
|
// - wait for a designated termination event to transpire and do some action.
|
||||||
type lifecycleSignals struct {
|
type lifecycleSignals struct {
|
||||||
// ShutdownInitiated event is signaled when an apiserver shutdown has been initiated.
|
// ShutdownInitiated event is signaled when an apiserver shutdown has been initiated.
|
||||||
// It is signaled when the `stopCh` provided by the main goroutine
|
// It is signaled when the `stopCh` provided by the main goroutine
|
||||||
|
|
|
@ -70,12 +70,13 @@ type AdmissionOptions struct {
|
||||||
|
|
||||||
// NewAdmissionOptions creates a new instance of AdmissionOptions
|
// NewAdmissionOptions creates a new instance of AdmissionOptions
|
||||||
// Note:
|
// Note:
|
||||||
// In addition it calls RegisterAllAdmissionPlugins to register
|
|
||||||
// all generic admission plugins.
|
|
||||||
//
|
//
|
||||||
// Provides the list of RecommendedPluginOrder that holds sane values
|
// In addition it calls RegisterAllAdmissionPlugins to register
|
||||||
// that can be used by servers that don't care about admission chain.
|
// all generic admission plugins.
|
||||||
// Servers that do care can overwrite/append that field after creation.
|
//
|
||||||
|
// Provides the list of RecommendedPluginOrder that holds sane values
|
||||||
|
// that can be used by servers that don't care about admission chain.
|
||||||
|
// Servers that do care can overwrite/append that field after creation.
|
||||||
func NewAdmissionOptions() *AdmissionOptions {
|
func NewAdmissionOptions() *AdmissionOptions {
|
||||||
options := &AdmissionOptions{
|
options := &AdmissionOptions{
|
||||||
Plugins: admission.NewPlugins(),
|
Plugins: admission.NewPlugins(),
|
||||||
|
@ -115,7 +116,8 @@ func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) {
|
||||||
// In case admission plugin names were not provided by a cluster-admin they will be prepared from the recommended/default values.
|
// In case admission plugin names were not provided by a cluster-admin they will be prepared from the recommended/default values.
|
||||||
// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers
|
// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers
|
||||||
// note this method uses:
|
// note this method uses:
|
||||||
// genericconfig.Authorizer
|
//
|
||||||
|
// genericconfig.Authorizer
|
||||||
func (a *AdmissionOptions) ApplyTo(
|
func (a *AdmissionOptions) ApplyTo(
|
||||||
c *server.Config,
|
c *server.Config,
|
||||||
informers informers.SharedInformerFactory,
|
informers informers.SharedInformerFactory,
|
||||||
|
@ -221,7 +223,7 @@ func (a *AdmissionOptions) enabledPluginNames() []string {
|
||||||
return orderedPlugins
|
return orderedPlugins
|
||||||
}
|
}
|
||||||
|
|
||||||
//Return names of plugins which are enabled by default
|
// Return names of plugins which are enabled by default
|
||||||
func (a *AdmissionOptions) defaultEnabledPluginNames() []string {
|
func (a *AdmissionOptions) defaultEnabledPluginNames() []string {
|
||||||
defaultOnPluginNames := []string{}
|
defaultOnPluginNames := []string{}
|
||||||
for _, pluginName := range a.RecommendedPluginOrder {
|
for _, pluginName := range a.RecommendedPluginOrder {
|
||||||
|
|
|
@ -38,8 +38,9 @@ import (
|
||||||
// DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to
|
// DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to
|
||||||
// the root kube API server.
|
// the root kube API server.
|
||||||
// WARNING: never assume that every authenticated incoming request already does authorization.
|
// WARNING: never assume that every authenticated incoming request already does authorization.
|
||||||
// The aggregator in the kube API server does this today, but this behaviour is not
|
//
|
||||||
// guaranteed in the future.
|
// The aggregator in the kube API server does this today, but this behaviour is not
|
||||||
|
// guaranteed in the future.
|
||||||
type DelegatingAuthorizationOptions struct {
|
type DelegatingAuthorizationOptions struct {
|
||||||
// RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the
|
// RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the
|
||||||
// SubjectAccessReview.authorization.k8s.io endpoint for checking tokens.
|
// SubjectAccessReview.authorization.k8s.io endpoint for checking tokens.
|
||||||
|
|
|
@ -587,7 +587,8 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTICE: Keep in sync with shouldListFromStorage function in
|
// NOTICE: Keep in sync with shouldListFromStorage function in
|
||||||
// staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
|
//
|
||||||
|
// staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
|
||||||
func shouldDelegateList(opts storage.ListOptions) bool {
|
func shouldDelegateList(opts storage.ListOptions) bool {
|
||||||
resourceVersion := opts.ResourceVersion
|
resourceVersion := opts.ResourceVersion
|
||||||
pred := opts.Predicate
|
pred := opts.Predicate
|
||||||
|
|
|
@ -30,13 +30,14 @@ const (
|
||||||
|
|
||||||
// timeBudget implements a budget of time that you can use and is
|
// timeBudget implements a budget of time that you can use and is
|
||||||
// periodically being refreshed. The pattern to use it is:
|
// periodically being refreshed. The pattern to use it is:
|
||||||
// budget := newTimeBudget(...)
|
//
|
||||||
// ...
|
// budget := newTimeBudget(...)
|
||||||
// timeout := budget.takeAvailable()
|
// ...
|
||||||
// // Now you can spend at most timeout on doing stuff
|
// timeout := budget.takeAvailable()
|
||||||
// ...
|
// // Now you can spend at most timeout on doing stuff
|
||||||
// // If you didn't use all timeout, return what you didn't use
|
// ...
|
||||||
// budget.returnUnused(<unused part of timeout>)
|
// // If you didn't use all timeout, return what you didn't use
|
||||||
|
// budget.returnUnused(<unused part of timeout>)
|
||||||
//
|
//
|
||||||
// NOTE: It's not recommended to be used concurrently from multiple threads -
|
// NOTE: It's not recommended to be used concurrently from multiple threads -
|
||||||
// if first user takes the whole timeout, the second one will get 0 timeout
|
// if first user takes the whole timeout, the second one will get 0 timeout
|
||||||
|
|
|
@ -53,7 +53,9 @@ import (
|
||||||
// an interval as invalid iff we need to copy events from the
|
// an interval as invalid iff we need to copy events from the
|
||||||
// watchCache and we end up needing events that have already
|
// watchCache and we end up needing events that have already
|
||||||
// been popped off. This translates to the following condition:
|
// been popped off. This translates to the following condition:
|
||||||
// watchCacheInterval::startIndex >= watchCache::startIndex.
|
//
|
||||||
|
// watchCacheInterval::startIndex >= watchCache::startIndex.
|
||||||
|
//
|
||||||
// When this condition becomes false, the interval is no longer
|
// When this condition becomes false, the interval is no longer
|
||||||
// valid and should not be used to retrieve and serve elements
|
// valid and should not be used to retrieve and serve elements
|
||||||
// from the underlying source.
|
// from the underlying source.
|
||||||
|
|
|
@ -47,7 +47,8 @@ func NewETCDLatencyTracker(delegate clientv3.KV) clientv3.KV {
|
||||||
// tracking function TrackStorageLatency is thread safe.
|
// tracking function TrackStorageLatency is thread safe.
|
||||||
//
|
//
|
||||||
// NOTE: Compact is an asynchronous process and is not associated with
|
// NOTE: Compact is an asynchronous process and is not associated with
|
||||||
// any request, so we will not be tracking its latency.
|
//
|
||||||
|
// any request, so we will not be tracking its latency.
|
||||||
type clientV3KVLatencyTracker struct {
|
type clientV3KVLatencyTracker struct {
|
||||||
clientv3.KV
|
clientv3.KV
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,13 +20,18 @@ package testingcert
|
||||||
// https://github.com/coreos/etcd/tree/master/hack/tls-setup for more details.
|
// https://github.com/coreos/etcd/tree/master/hack/tls-setup for more details.
|
||||||
//
|
//
|
||||||
// ca-config.json:
|
// ca-config.json:
|
||||||
// expiry was changed from 1 year to 100 years (876000h)
|
//
|
||||||
|
// expiry was changed from 1 year to 100 years (876000h)
|
||||||
|
//
|
||||||
// ca-csr.json:
|
// ca-csr.json:
|
||||||
// ca expiry was set to 100 years (876000h) ("ca":{"expiry":"876000h"})
|
//
|
||||||
// key was changed from ecdsa,384 to rsa,2048
|
// ca expiry was set to 100 years (876000h) ("ca":{"expiry":"876000h"})
|
||||||
|
// key was changed from ecdsa,384 to rsa,2048
|
||||||
|
//
|
||||||
// req-csr.json:
|
// req-csr.json:
|
||||||
// key was changed from ecdsa,384 to rsa,2048
|
//
|
||||||
// hosts were changed to "localhost","127.0.0.1"
|
// key was changed from ecdsa,384 to rsa,2048
|
||||||
|
// hosts were changed to "localhost","127.0.0.1"
|
||||||
const CAFileContent = `
|
const CAFileContent = `
|
||||||
-----BEGIN CERTIFICATE-----
|
-----BEGIN CERTIFICATE-----
|
||||||
MIIEUDCCAzigAwIBAgIUKfV5+qwlw3JneAPdJS7JCO8xIlYwDQYJKoZIhvcNAQEL
|
MIIEUDCCAzigAwIBAgIUKfV5+qwlw3JneAPdJS7JCO8xIlYwDQYJKoZIhvcNAQEL
|
||||||
|
|
|
@ -49,11 +49,11 @@ func getAvailablePorts(count int) ([]int, error) {
|
||||||
|
|
||||||
// NewTestConfig returns a configuration for an embedded etcd server.
|
// NewTestConfig returns a configuration for an embedded etcd server.
|
||||||
// The configuration is based on embed.NewConfig(), with the following adjustments:
|
// The configuration is based on embed.NewConfig(), with the following adjustments:
|
||||||
// * sets UnsafeNoFsync = true to improve test performance (only reasonable in a test-only
|
// - sets UnsafeNoFsync = true to improve test performance (only reasonable in a test-only
|
||||||
// single-member server we never intend to restart or keep data from)
|
// single-member server we never intend to restart or keep data from)
|
||||||
// * uses free ports for client and peer listeners
|
// - uses free ports for client and peer listeners
|
||||||
// * cleans up the data directory on test termination
|
// - cleans up the data directory on test termination
|
||||||
// * silences server logs other than errors
|
// - silences server logs other than errors
|
||||||
func NewTestConfig(t *testing.T) *embed.Config {
|
func NewTestConfig(t *testing.T) *embed.Config {
|
||||||
cfg := embed.NewConfig()
|
cfg := embed.NewConfig()
|
||||||
|
|
||||||
|
|
|
@ -91,10 +91,11 @@ func findStatusCondition(conditions []v1alpha1.StorageVersionCondition,
|
||||||
|
|
||||||
// setStatusCondition sets the corresponding condition in conditions to newCondition.
|
// setStatusCondition sets the corresponding condition in conditions to newCondition.
|
||||||
// conditions must be non-nil.
|
// conditions must be non-nil.
|
||||||
// 1. if the condition of the specified type already exists: all fields of the existing condition are updated to
|
// 1. if the condition of the specified type already exists: all fields of the existing condition are updated to
|
||||||
// newCondition, LastTransitionTime is set to now if the new status differs from the old status
|
// newCondition, LastTransitionTime is set to now if the new status differs from the old status
|
||||||
// 2. if a condition of the specified type does not exist: LastTransitionTime is set to now() if unset,
|
// 2. if a condition of the specified type does not exist: LastTransitionTime is set to now() if unset,
|
||||||
// and newCondition is appended
|
// and newCondition is appended
|
||||||
|
//
|
||||||
// NOTE: forceTransition allows overwriting LastTransitionTime even when the status doesn't change.
|
// NOTE: forceTransition allows overwriting LastTransitionTime even when the status doesn't change.
|
||||||
func setStatusCondition(conditions *[]v1alpha1.StorageVersionCondition, newCondition v1alpha1.StorageVersionCondition,
|
func setStatusCondition(conditions *[]v1alpha1.StorageVersionCondition, newCondition v1alpha1.StorageVersionCondition,
|
||||||
forceTransition bool) {
|
forceTransition bool) {
|
||||||
|
|
|
@ -52,12 +52,12 @@ limitations under the License.
|
||||||
// limit. In the original paper, the partial derivative of R(t) with
|
// limit. In the original paper, the partial derivative of R(t) with
|
||||||
// respect to t is
|
// respect to t is
|
||||||
//
|
//
|
||||||
// 1 / NEQ(t) .
|
// 1 / NEQ(t) .
|
||||||
//
|
//
|
||||||
// To generalize from transmitting one packet at a time to executing C
|
// To generalize from transmitting one packet at a time to executing C
|
||||||
// requests at a time, that derivative becomes
|
// requests at a time, that derivative becomes
|
||||||
//
|
//
|
||||||
// C / NEQ(t) .
|
// C / NEQ(t) .
|
||||||
//
|
//
|
||||||
// However, sometimes there are fewer than C requests available to
|
// However, sometimes there are fewer than C requests available to
|
||||||
// execute. For a given queue "q", let us also write "reqs(q, t)" for
|
// execute. For a given queue "q", let us also write "reqs(q, t)" for
|
||||||
|
@ -70,7 +70,7 @@ limitations under the License.
|
||||||
// for server requests: at a particular time t, the partial derivative
|
// for server requests: at a particular time t, the partial derivative
|
||||||
// of R(t) with respect to t is
|
// of R(t) with respect to t is
|
||||||
//
|
//
|
||||||
// min( C, sum[over q] reqs(q, t) ) / NEQ(t) .
|
// min( C, sum[over q] reqs(q, t) ) / NEQ(t) .
|
||||||
//
|
//
|
||||||
// In terms of the implementation outline, this is the rate at which
|
// In terms of the implementation outline, this is the rate at which
|
||||||
// virtual time is advancing at time t (in virtual nanoseconds per
|
// virtual time is advancing at time t (in virtual nanoseconds per
|
||||||
|
@ -116,5 +116,4 @@ limitations under the License.
|
||||||
// queue’s virtual start time is advanced by G. When a request
|
// queue’s virtual start time is advanced by G. When a request
|
||||||
// finishes being served, and the actual service time was S, the
|
// finishes being served, and the actual service time was S, the
|
||||||
// queue’s virtual start time is decremented by G - S.
|
// queue’s virtual start time is decremented by G - S.
|
||||||
//
|
|
||||||
package queueset
|
package queueset
|
||||||
|
|
|
@ -970,19 +970,19 @@ func TestTimeout(t *testing.T) {
|
||||||
|
|
||||||
// TestContextCancel tests cancellation of a request's context.
|
// TestContextCancel tests cancellation of a request's context.
|
||||||
// The outline is:
|
// The outline is:
|
||||||
// 1. Use a concurrency limit of 1.
|
// 1. Use a concurrency limit of 1.
|
||||||
// 2. Start request 1.
|
// 2. Start request 1.
|
||||||
// 3. Use a fake clock for the following logic, to insulate from scheduler noise.
|
// 3. Use a fake clock for the following logic, to insulate from scheduler noise.
|
||||||
// 4. The exec fn of request 1 starts request 2, which should wait
|
// 4. The exec fn of request 1 starts request 2, which should wait
|
||||||
// in its queue.
|
// in its queue.
|
||||||
// 5. The exec fn of request 1 also forks a goroutine that waits 1 second
|
// 5. The exec fn of request 1 also forks a goroutine that waits 1 second
|
||||||
// and then cancels the context of request 2.
|
// and then cancels the context of request 2.
|
||||||
// 6. The exec fn of request 1, if StartRequest 2 returns a req2 (which is the normal case),
|
// 6. The exec fn of request 1, if StartRequest 2 returns a req2 (which is the normal case),
|
||||||
// calls `req2.Finish`, which is expected to return after the context cancel.
|
// calls `req2.Finish`, which is expected to return after the context cancel.
|
||||||
// 7. The queueset interface allows StartRequest 2 to return `nil` in this situation,
|
// 7. The queueset interface allows StartRequest 2 to return `nil` in this situation,
|
||||||
// if the scheduler gets the cancel done before StartRequest finishes;
|
// if the scheduler gets the cancel done before StartRequest finishes;
|
||||||
// the test handles this without regard to whether the implementation will ever do that.
|
// the test handles this without regard to whether the implementation will ever do that.
|
||||||
// 8. Check that the above took exactly 1 second.
|
// 8. Check that the above took exactly 1 second.
|
||||||
func TestContextCancel(t *testing.T) {
|
func TestContextCancel(t *testing.T) {
|
||||||
metrics.Register()
|
metrics.Register()
|
||||||
metrics.Reset()
|
metrics.Reset()
|
||||||
|
|
|
@ -40,7 +40,8 @@ type waitGroupCounter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// compile time assertion that waitGroupCounter meets requirements
|
// compile time assertion that waitGroupCounter meets requirements
|
||||||
// of GoRoutineCounter
|
//
|
||||||
|
// of GoRoutineCounter
|
||||||
var _ counter.GoRoutineCounter = (*waitGroupCounter)(nil)
|
var _ counter.GoRoutineCounter = (*waitGroupCounter)(nil)
|
||||||
|
|
||||||
func (wgc *waitGroupCounter) Add(delta int) {
|
func (wgc *waitGroupCounter) Add(delta int) {
|
||||||
|
|
|
@ -22,7 +22,8 @@ type Observer interface {
|
||||||
Observe(float64)
|
Observe(float64)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeObserver extends Observer with the ability to take
|
// ChangeObserver extends Observer with the ability to take
|
||||||
|
//
|
||||||
// an observation that is relative to the previous observation.
|
// an observation that is relative to the previous observation.
|
||||||
type ChangeObserver interface {
|
type ChangeObserver interface {
|
||||||
Observer
|
Observer
|
||||||
|
|
|
@ -38,19 +38,21 @@ const (
|
||||||
|
|
||||||
var errMetricNotFound = errors.New("not found")
|
var errMetricNotFound = errors.New("not found")
|
||||||
|
|
||||||
/* TestSampler does a rough behavioral test of the sampling in a
|
/*
|
||||||
SampleAndWatermarkHistograms. The test creates one and exercises
|
TestSampler does a rough behavioral test of the sampling in a
|
||||||
it, checking that the count in the sampling histogram is correct at
|
|
||||||
each step. The sampling histogram is expected to get one
|
SampleAndWatermarkHistograms. The test creates one and exercises
|
||||||
observation at the end of each sampling period. A fake clock is
|
it, checking that the count in the sampling histogram is correct at
|
||||||
used, and the exercise consists of repeatedly changing that fake
|
each step. The sampling histogram is expected to get one
|
||||||
clock by an amount of time chosen uniformly at random from a range
|
observation at the end of each sampling period. A fake clock is
|
||||||
that goes from a little negative to somewhat more than two sampling
|
used, and the exercise consists of repeatedly changing that fake
|
||||||
periods. The negative changes are included because small negative
|
clock by an amount of time chosen uniformly at random from a range
|
||||||
changes have been observed in real monotonic clock readings (see
|
that goes from a little negative to somewhat more than two sampling
|
||||||
issue #96459) and we want to test that they are properly tolerated.
|
periods. The negative changes are included because small negative
|
||||||
The designed toleration is to pretend that the clock did not
|
changes have been observed in real monotonic clock readings (see
|
||||||
change, until it resumes net forward progress.
|
issue #96459) and we want to test that they are properly tolerated.
|
||||||
|
The designed toleration is to pretend that the clock did not
|
||||||
|
change, until it resumes net forward progress.
|
||||||
*/
|
*/
|
||||||
func TestSampler(t *testing.T) {
|
func TestSampler(t *testing.T) {
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
|
|
|
@ -141,7 +141,8 @@ func key(requestInfo *apirequest.RequestInfo) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTICE: Keep in sync with shouldDelegateList function in
|
// NOTICE: Keep in sync with shouldDelegateList function in
|
||||||
// staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go
|
//
|
||||||
|
// staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go
|
||||||
func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool {
|
func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool {
|
||||||
resourceVersion := opts.ResourceVersion
|
resourceVersion := opts.ResourceVersion
|
||||||
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
|
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
|
||||||
|
|
|
@ -195,8 +195,9 @@ func (w *watchTracker) forgetWatch(identifier *watchIdentifier, index *indexValu
|
||||||
// GetInterestedWatchCount implements WatchTracker interface.
|
// GetInterestedWatchCount implements WatchTracker interface.
|
||||||
//
|
//
|
||||||
// TODO(wojtek-t): As of now, requestInfo for object creation (POST) doesn't
|
// TODO(wojtek-t): As of now, requestInfo for object creation (POST) doesn't
|
||||||
// contain the Name field set. Figure out if we can somehow get it for the
|
//
|
||||||
// more accurate cost estimation.
|
// contain the Name field set. Figure out if we can somehow get it for the
|
||||||
|
// more accurate cost estimation.
|
||||||
//
|
//
|
||||||
// TODO(wojtek-t): Figure out how to approach DELETECOLLECTION calls.
|
// TODO(wojtek-t): Figure out how to approach DELETECOLLECTION calls.
|
||||||
func (w *watchTracker) GetInterestedWatchCount(requestInfo *request.RequestInfo) int {
|
func (w *watchTracker) GetInterestedWatchCount(requestInfo *request.RequestInfo) int {
|
||||||
|
|
|
@ -38,7 +38,8 @@ func NewDefaultServiceResolver() ServiceResolver {
|
||||||
// note that the name, namespace, and port are required and by default all
|
// note that the name, namespace, and port are required and by default all
|
||||||
// created addresses use HTTPS scheme.
|
// created addresses use HTTPS scheme.
|
||||||
// for example:
|
// for example:
|
||||||
// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443
|
//
|
||||||
|
// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443
|
||||||
func (sr defaultServiceResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) {
|
func (sr defaultServiceResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) {
|
||||||
if len(name) == 0 || len(namespace) == 0 || port == 0 {
|
if len(name) == 0 || len(namespace) == 0 || port == 0 {
|
||||||
return nil, errors.New("cannot resolve an empty service name or namespace or port")
|
return nil, errors.New("cannot resolve an empty service name or namespace or port")
|
||||||
|
|
|
@ -40,11 +40,10 @@ import (
|
||||||
//
|
//
|
||||||
// Example client session:
|
// Example client session:
|
||||||
//
|
//
|
||||||
// CONNECT http://server.com with subprotocol "channel.k8s.io"
|
// CONNECT http://server.com with subprotocol "channel.k8s.io"
|
||||||
// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN)
|
// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN)
|
||||||
// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT)
|
// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT)
|
||||||
// CLOSE
|
// CLOSE
|
||||||
//
|
|
||||||
const ChannelWebSocketProtocol = "channel.k8s.io"
|
const ChannelWebSocketProtocol = "channel.k8s.io"
|
||||||
|
|
||||||
// The Websocket subprotocol "base64.channel.k8s.io" base64 encodes each message with a character
|
// The Websocket subprotocol "base64.channel.k8s.io" base64 encodes each message with a character
|
||||||
|
@ -56,11 +55,10 @@ const ChannelWebSocketProtocol = "channel.k8s.io"
|
||||||
//
|
//
|
||||||
// Example client session:
|
// Example client session:
|
||||||
//
|
//
|
||||||
// CONNECT http://server.com with subprotocol "base64.channel.k8s.io"
|
// CONNECT http://server.com with subprotocol "base64.channel.k8s.io"
|
||||||
// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN)
|
// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN)
|
||||||
// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT)
|
// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT)
|
||||||
// CLOSE
|
// CLOSE
|
||||||
//
|
|
||||||
const Base64ChannelWebSocketProtocol = "base64.channel.k8s.io"
|
const Base64ChannelWebSocketProtocol = "base64.channel.k8s.io"
|
||||||
|
|
||||||
type codecType int
|
type codecType int
|
||||||
|
|
|
@ -75,17 +75,17 @@ func (c *counterRaiser) IncreaseMetricsCounter(req *http.Request) {
|
||||||
// NewDeprecatedCertificateRoundTripperWrapperConstructor returns a RoundTripper wrapper that's usable within ClientConfig.Wrap.
|
// NewDeprecatedCertificateRoundTripperWrapperConstructor returns a RoundTripper wrapper that's usable within ClientConfig.Wrap.
|
||||||
//
|
//
|
||||||
// It increases the `missingSAN` counter whenever:
|
// It increases the `missingSAN` counter whenever:
|
||||||
// 1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field`
|
// 1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field`
|
||||||
// which indicates an error caused by the deprecation of Common Name field when veryfing remote
|
// which indicates an error caused by the deprecation of Common Name field when veryfing remote
|
||||||
// hostname
|
// hostname
|
||||||
// 2. the server certificate in response contains no SAN. This indicates that this binary run
|
// 2. the server certificate in response contains no SAN. This indicates that this binary run
|
||||||
// with the GODEBUG=x509ignoreCN=0 in env
|
// with the GODEBUG=x509ignoreCN=0 in env
|
||||||
//
|
//
|
||||||
// It increases the `sha1` counter whenever:
|
// It increases the `sha1` counter whenever:
|
||||||
// 1. we get a x509.InsecureAlgorithmError with string `SHA1`
|
// 1. we get a x509.InsecureAlgorithmError with string `SHA1`
|
||||||
// which indicates an error caused by an insecure SHA1 signature
|
// which indicates an error caused by an insecure SHA1 signature
|
||||||
// 2. the server certificate in response contains a SHA1WithRSA or ECDSAWithSHA1 signature.
|
// 2. the server certificate in response contains a SHA1WithRSA or ECDSAWithSHA1 signature.
|
||||||
// This indicates that this binary run with the GODEBUG=x509sha1=1 in env
|
// This indicates that this binary run with the GODEBUG=x509sha1=1 in env
|
||||||
func NewDeprecatedCertificateRoundTripperWrapperConstructor(missingSAN, sha1 *metrics.Counter) func(rt http.RoundTripper) http.RoundTripper {
|
func NewDeprecatedCertificateRoundTripperWrapperConstructor(missingSAN, sha1 *metrics.Counter) func(rt http.RoundTripper) http.RoundTripper {
|
||||||
return func(rt http.RoundTripper) http.RoundTripper {
|
return func(rt http.RoundTripper) http.RoundTripper {
|
||||||
return &x509DeprecatedCertificateMetricsRTWrapper{
|
return &x509DeprecatedCertificateMetricsRTWrapper{
|
||||||
|
|
|
@ -190,10 +190,10 @@ func (b *bufferedBackend) processIncomingEvents(stopCh <-chan struct{}) {
|
||||||
// The following things can cause collectEvents to stop and return the list
|
// The following things can cause collectEvents to stop and return the list
|
||||||
// of events:
|
// of events:
|
||||||
//
|
//
|
||||||
// * Maximum number of events for a batch.
|
// - Maximum number of events for a batch.
|
||||||
// * Timer has passed.
|
// - Timer has passed.
|
||||||
// * Buffer channel is closed and empty.
|
// - Buffer channel is closed and empty.
|
||||||
// * stopCh is closed.
|
// - stopCh is closed.
|
||||||
func (b *bufferedBackend) collectEvents(timer <-chan time.Time, stopCh <-chan struct{}) []*auditinternal.Event {
|
func (b *bufferedBackend) collectEvents(timer <-chan time.Time, stopCh <-chan struct{}) []*auditinternal.Event {
|
||||||
var events []*auditinternal.Event
|
var events []*auditinternal.Event
|
||||||
|
|
||||||
|
|
|
@ -452,18 +452,18 @@ func (r *claimResolver) Verifier(iss string) (*oidc.IDTokenVerifier, error) {
|
||||||
// OIDC Connect Core 1.0, section 5.6.2.
|
// OIDC Connect Core 1.0, section 5.6.2.
|
||||||
// See: https://openid.net/specs/openid-connect-core-1_0.html#AggregatedDistributedClaims
|
// See: https://openid.net/specs/openid-connect-core-1_0.html#AggregatedDistributedClaims
|
||||||
//
|
//
|
||||||
// {
|
// {
|
||||||
// ... (other normal claims)...
|
// ... (other normal claims)...
|
||||||
// "_claim_names": {
|
// "_claim_names": {
|
||||||
// "groups": "src1"
|
// "groups": "src1"
|
||||||
// },
|
// },
|
||||||
// "_claim_sources": {
|
// "_claim_sources": {
|
||||||
// "src1": {
|
// "src1": {
|
||||||
// "endpoint": "https://www.example.com",
|
// "endpoint": "https://www.example.com",
|
||||||
// "access_token": "f005ba11"
|
// "access_token": "f005ba11"
|
||||||
// },
|
// },
|
||||||
// },
|
// },
|
||||||
// }
|
// }
|
||||||
func (r *claimResolver) expand(c claims) error {
|
func (r *claimResolver) expand(c claims) error {
|
||||||
const (
|
const (
|
||||||
// The claim containing a map of endpoint references per claim.
|
// The claim containing a map of endpoint references per claim.
|
||||||
|
|
|
@ -76,19 +76,19 @@ func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1I
|
||||||
// New creates a new WebhookAuthorizer from the provided kubeconfig file.
|
// New creates a new WebhookAuthorizer from the provided kubeconfig file.
|
||||||
// The config's cluster field is used to refer to the remote service, user refers to the returned authorizer.
|
// The config's cluster field is used to refer to the remote service, user refers to the returned authorizer.
|
||||||
//
|
//
|
||||||
// # clusters refers to the remote service.
|
// # clusters refers to the remote service.
|
||||||
// clusters:
|
// clusters:
|
||||||
// - name: name-of-remote-authz-service
|
// - name: name-of-remote-authz-service
|
||||||
// cluster:
|
// cluster:
|
||||||
// certificate-authority: /path/to/ca.pem # CA for verifying the remote service.
|
// certificate-authority: /path/to/ca.pem # CA for verifying the remote service.
|
||||||
// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'.
|
// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'.
|
||||||
//
|
//
|
||||||
// # users refers to the API server's webhook configuration.
|
// # users refers to the API server's webhook configuration.
|
||||||
// users:
|
// users:
|
||||||
// - name: name-of-api-server
|
// - name: name-of-api-server
|
||||||
// user:
|
// user:
|
||||||
// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use
|
// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use
|
||||||
// client-key: /path/to/key.pem # key matching the cert
|
// client-key: /path/to/key.pem # key matching the cert
|
||||||
//
|
//
|
||||||
// For additional HTTP configuration, refer to the kubeconfig documentation
|
// For additional HTTP configuration, refer to the kubeconfig documentation
|
||||||
// https://kubernetes.io/docs/user-guide/kubeconfig-file/.
|
// https://kubernetes.io/docs/user-guide/kubeconfig-file/.
|
||||||
|
@ -120,45 +120,45 @@ func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, un
|
||||||
// serialized api.authorization.v1beta1.SubjectAccessReview object. An example request body is
|
// serialized api.authorization.v1beta1.SubjectAccessReview object. An example request body is
|
||||||
// provided below.
|
// provided below.
|
||||||
//
|
//
|
||||||
// {
|
// {
|
||||||
// "apiVersion": "authorization.k8s.io/v1beta1",
|
// "apiVersion": "authorization.k8s.io/v1beta1",
|
||||||
// "kind": "SubjectAccessReview",
|
// "kind": "SubjectAccessReview",
|
||||||
// "spec": {
|
// "spec": {
|
||||||
// "resourceAttributes": {
|
// "resourceAttributes": {
|
||||||
// "namespace": "kittensandponies",
|
// "namespace": "kittensandponies",
|
||||||
// "verb": "GET",
|
// "verb": "GET",
|
||||||
// "group": "group3",
|
// "group": "group3",
|
||||||
// "resource": "pods"
|
// "resource": "pods"
|
||||||
// },
|
// },
|
||||||
// "user": "jane",
|
// "user": "jane",
|
||||||
// "group": [
|
// "group": [
|
||||||
// "group1",
|
// "group1",
|
||||||
// "group2"
|
// "group2"
|
||||||
// ]
|
// ]
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// The remote service is expected to fill the SubjectAccessReviewStatus field to either allow or
|
// The remote service is expected to fill the SubjectAccessReviewStatus field to either allow or
|
||||||
// disallow access. A permissive response would return:
|
// disallow access. A permissive response would return:
|
||||||
//
|
//
|
||||||
// {
|
// {
|
||||||
// "apiVersion": "authorization.k8s.io/v1beta1",
|
// "apiVersion": "authorization.k8s.io/v1beta1",
|
||||||
// "kind": "SubjectAccessReview",
|
// "kind": "SubjectAccessReview",
|
||||||
// "status": {
|
// "status": {
|
||||||
// "allowed": true
|
// "allowed": true
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// To disallow access, the remote service would return:
|
// To disallow access, the remote service would return:
|
||||||
//
|
//
|
||||||
// {
|
// {
|
||||||
// "apiVersion": "authorization.k8s.io/v1beta1",
|
// "apiVersion": "authorization.k8s.io/v1beta1",
|
||||||
// "kind": "SubjectAccessReview",
|
// "kind": "SubjectAccessReview",
|
||||||
// "status": {
|
// "status": {
|
||||||
// "allowed": false,
|
// "allowed": false,
|
||||||
// "reason": "user does not have read access to the namespace"
|
// "reason": "user does not have read access to the namespace"
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// TODO(mikedanese): We should eventually support failing closed when we
|
// TODO(mikedanese): We should eventually support failing closed when we
|
||||||
// encounter an error. We are failing open now to preserve backwards compatible
|
// encounter an error. We are failing open now to preserve backwards compatible
|
||||||
|
@ -246,7 +246,7 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: need to finish the method to get the rules when using webhook mode
|
// TODO: need to finish the method to get the rules when using webhook mode
|
||||||
func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) {
|
func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) {
|
||||||
var (
|
var (
|
||||||
resourceRules []authorizer.ResourceRuleInfo
|
resourceRules []authorizer.ResourceRuleInfo
|
||||||
|
|
Loading…
Reference in New Issue