Generate and format files

- Run hack/update-codegen.sh
- Run hack/update-generated-device-plugin.sh
- Run hack/update-generated-protobuf.sh
- Run hack/update-generated-runtime.sh
- Run hack/update-generated-swagger-docs.sh
- Run hack/update-openapi-spec.sh
- Run hack/update-gofmt.sh

Replay of a9593d634c6a053848413e600dadbf974627515f

Kubernetes-commit: 15d9d196476d64482189f00f1cf1a2061aea5b35
This commit is contained in:
Jordan Liggitt 2022-11-16 11:39:18 -05:00 committed by Kubernetes Publisher
parent 603d049f4a
commit b3ad9fb4e3
36 changed files with 230 additions and 206 deletions

View File

@ -52,9 +52,10 @@ type WantsQuotaConfiguration interface {
// WantsFeatureGate defines a function which passes the featureGates for inspection by an admission plugin.
// Admission plugins should not hold a reference to the featureGates. Instead, they should query a particular one
// and assign it to a simple bool in the admission plugin struct.
// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){
// a.myFeatureIsOn = features.Enabled("my-feature")
// }
//
// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){
// a.myFeatureIsOn = features.Enabled("my-feature")
// }
type WantsFeatures interface {
InspectFeatureGates(featuregate.FeatureGate)
admission.InitializationValidator

View File

@ -202,16 +202,16 @@ func (e *quotaEvaluator) checkAttributes(ns string, admissionAttributes []*admis
// checkQuotas checks the admission attributes against the passed quotas. If a quota applies, it will attempt to update it
// AFTER it has checked all the admissionAttributes. The method breaks down into phase like this:
// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the
// originals
// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed
// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change
// mark the waiter as succeeded. If some quota did change, update the running quotas
// 2. If no running quota was changed, return now since no updates are needed.
// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some
// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version
// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota
// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded.
// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the
// originals
// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed
// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change
// mark the waiter as succeeded. If some quota did change, update the running quotas
// 2. If no running quota was changed, return now since no updates are needed.
// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some
// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version
// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota
// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded.
func (e *quotaEvaluator) checkQuotas(quotas []corev1.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) {
// yet another copy to compare against originals to see if we actually have deltas
originalQuotas, err := copyQuotas(quotas)

View File

@ -18,15 +18,17 @@ limitations under the License.
// source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
/*
Package v1 is a generated protocol buffer package.
Package v1 is a generated protocol buffer package.
It is generated from these files:
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
It is generated from these files:
It has these top-level messages:
ReplicaSet
ReplicaSetSpec
ReplicaSetStatus
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
It has these top-level messages:
ReplicaSet
ReplicaSetSpec
ReplicaSetStatus
*/
package v1

View File

@ -43,8 +43,9 @@ var leadingDigits = regexp.MustCompile(`^(\d+)`)
// MajorMinor parses a numeric major/minor version from the provided version info.
// The minor version drops all characters after the first non-digit character:
// version.Info{Major:"1", Minor:"2+"} -> 1,2
// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2
//
// version.Info{Major:"1", Minor:"2+"} -> 1,2
// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2
func MajorMinor(v version.Info) (int, int, error) {
major, err := strconv.Atoi(v.Major)
if err != nil {

View File

@ -56,9 +56,9 @@ func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion sch
// ToSubresource filter the managed fields of the main resource and convert
// them so that they can be handled by scale.
// For the managed fields that have a replicas path it performs two changes:
// 1. APIVersion is changed to the APIVersion of the scale subresource
// 2. Replicas path of the main resource is transformed to the replicas path of
// the scale subresource
// 1. APIVersion is changed to the APIVersion of the scale subresource
// 2. Replicas path of the main resource is transformed to the replicas path of
// the scale subresource
func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) {
managed, err := DecodeManagedFields(h.parentEntries)
if err != nil {

View File

@ -41,7 +41,7 @@ func TestNamespaceContext(t *testing.T) {
}
}
//TestUserContext validates that a userinfo can be get/set on a context object
// TestUserContext validates that a userinfo can be get/set on a context object
func TestUserContext(t *testing.T) {
ctx := NewContext()
_, ok := UserFrom(ctx)

View File

@ -42,19 +42,19 @@ type UserProvidedDecorator interface {
// object implements the same subset of those interfaces as the inner http.ResponseWriter.
//
// This function handles the following three casses.
// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`,
// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter).
// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher`
// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter).
// - All the other cases collapse to this one, in which the given ResponseWriter is returned.
// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`,
// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter).
// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher`
// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter).
// - All the other cases collapse to this one, in which the given ResponseWriter is returned.
//
// There are three applicable terms:
// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function.
// - "user-provided decorator" or "middle": this is the user-provided decorator
// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function.
// - "user-provided decorator" or "middle": this is the user-provided decorator
// that decorates an inner ResponseWriter object. A user-provided decorator
// implements the UserProvidedDecorator interface. A user-provided decorator
// may or may not implement http.CloseNotifier, http.Flusher or http.Hijacker.
// - "inner": the ResponseWriter that the user-provided decorator extends.
// - "inner": the ResponseWriter that the user-provided decorator extends.
func WrapForHTTP1Or2(decorator UserProvidedDecorator) http.ResponseWriter {
// from go net/http documentation:
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations support Flusher

View File

@ -900,13 +900,13 @@ func markAsDeleting(obj runtime.Object, now time.Time) (err error) {
// grace period seconds (graceful deletion) and updating the list of
// finalizers (finalization); it returns:
//
// 1. an error
// 2. a boolean indicating that the object was not found, but it should be
// ignored
// 3. a boolean indicating that the object's grace period is exhausted and it
// should be deleted immediately
// 4. a new output object with the state that was updated
// 5. a copy of the last existing state of the object
// 1. an error
// 2. a boolean indicating that the object was not found, but it should be
// ignored
// 3. a boolean indicating that the object's grace period is exhausted and it
// should be deleted immediately
// 4. a new output object with the state that was updated
// 5. a copy of the last existing state of the object
func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
lastGraceful := int64(0)
var pendingFinalizers bool

View File

@ -875,8 +875,8 @@ func (t *Tester) testDeleteNonExist(obj runtime.Object, opts metav1.DeleteOption
}
// This test the fast-fail path. We test that the precondition gets verified
// again before deleting the object in tests of pkg/storage/etcd.
// This test the fast-fail path. We test that the precondition gets verified
// again before deleting the object in tests of pkg/storage/etcd.
func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
ctx := t.TestContext()
@ -912,8 +912,8 @@ func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getF
}
}
// This test the fast-fail path. We test that the precondition gets verified
// again before deleting the object in tests of pkg/storage/etcd.
// This test the fast-fail path. We test that the precondition gets verified
// again before deleting the object in tests of pkg/storage/etcd.
func (t *Tester) testDeleteWithResourceVersion(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
ctx := t.TestContext()

View File

@ -71,13 +71,13 @@ func withFakeUser(handler http.Handler) http.Handler {
}
// Tests that MaxInFlightLimit works, i.e.
// - "long" requests such as proxy or watch, identified by regexp are not accounted despite
// hanging for the long time,
// - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the
// constructor in flight at any given moment,
// - subsequent "short" requests are rejected instantly with appropriate error,
// - subsequent "long" requests are handled normally,
// - we correctly recover after some "short" requests finish, i.e. we can process new ones.
// - "long" requests such as proxy or watch, identified by regexp are not accounted despite
// hanging for the long time,
// - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the
// constructor in flight at any given moment,
// - subsequent "short" requests are rejected instantly with appropriate error,
// - subsequent "long" requests are handled normally,
// - we correctly recover after some "short" requests finish, i.e. we can process new ones.
func TestMaxInFlightNonMutating(t *testing.T) {
const AllowedNonMutatingInflightRequestsNo = 3

View File

@ -154,7 +154,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) {
d.nonGoRestfulMux.ServeHTTP(w, req)
}
//TODO: Unify with RecoverPanics?
// TODO: Unify with RecoverPanics?
func logStackOnRecover(s runtime.NegotiatedSerializer, panicReason interface{}, w http.ResponseWriter) {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))

View File

@ -16,6 +16,7 @@ limitations under the License.
// Package healthz implements basic http server health checking.
// Usage:
// import "k8s.io/apiserver/pkg/server/healthz"
// healthz.InstallHandler(mux)
//
// import "k8s.io/apiserver/pkg/server/healthz"
// healthz.InstallHandler(mux)
package healthz // import "k8s.io/apiserver/pkg/server/healthz"

View File

@ -35,6 +35,7 @@ import (
// 2. conflicts between the different processes all trying to perform the same action
// 3. partially complete work (API server crashes while running your hook)
// 4. API server access **BEFORE** your hook has completed
//
// Think of it like a mini-controller that is super privileged and gets to run in-process
// If you use this feature, tag @deads2k on github who has promised to review code for anyone's PostStartHook
// until it becomes easier to use.

View File

@ -107,8 +107,8 @@ type lifecycleSignal interface {
// for us to write unit tests that can verify expected graceful termination behavior.
//
// GenericAPIServer can use these to either:
// - signal that a particular termination event has transpired
// - wait for a designated termination event to transpire and do some action.
// - signal that a particular termination event has transpired
// - wait for a designated termination event to transpire and do some action.
type lifecycleSignals struct {
// ShutdownInitiated event is signaled when an apiserver shutdown has been initiated.
// It is signaled when the `stopCh` provided by the main goroutine

View File

@ -70,12 +70,13 @@ type AdmissionOptions struct {
// NewAdmissionOptions creates a new instance of AdmissionOptions
// Note:
// In addition it calls RegisterAllAdmissionPlugins to register
// all generic admission plugins.
//
// Provides the list of RecommendedPluginOrder that holds sane values
// that can be used by servers that don't care about admission chain.
// Servers that do care can overwrite/append that field after creation.
// In addition it calls RegisterAllAdmissionPlugins to register
// all generic admission plugins.
//
// Provides the list of RecommendedPluginOrder that holds sane values
// that can be used by servers that don't care about admission chain.
// Servers that do care can overwrite/append that field after creation.
func NewAdmissionOptions() *AdmissionOptions {
options := &AdmissionOptions{
Plugins: admission.NewPlugins(),
@ -115,7 +116,8 @@ func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) {
// In case admission plugin names were not provided by a cluster-admin they will be prepared from the recommended/default values.
// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers
// note this method uses:
// genericconfig.Authorizer
//
// genericconfig.Authorizer
func (a *AdmissionOptions) ApplyTo(
c *server.Config,
informers informers.SharedInformerFactory,
@ -221,7 +223,7 @@ func (a *AdmissionOptions) enabledPluginNames() []string {
return orderedPlugins
}
//Return names of plugins which are enabled by default
// Return names of plugins which are enabled by default
func (a *AdmissionOptions) defaultEnabledPluginNames() []string {
defaultOnPluginNames := []string{}
for _, pluginName := range a.RecommendedPluginOrder {

View File

@ -38,8 +38,9 @@ import (
// DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to
// the root kube API server.
// WARNING: never assume that every authenticated incoming request already does authorization.
// The aggregator in the kube API server does this today, but this behaviour is not
// guaranteed in the future.
//
// The aggregator in the kube API server does this today, but this behaviour is not
// guaranteed in the future.
type DelegatingAuthorizationOptions struct {
// RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the
// SubjectAccessReview.authorization.k8s.io endpoint for checking tokens.

View File

@ -587,7 +587,8 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o
}
// NOTICE: Keep in sync with shouldListFromStorage function in
// staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
//
// staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
func shouldDelegateList(opts storage.ListOptions) bool {
resourceVersion := opts.ResourceVersion
pred := opts.Predicate

View File

@ -30,13 +30,14 @@ const (
// timeBudget implements a budget of time that you can use and is
// periodically being refreshed. The pattern to use it is:
// budget := newTimeBudget(...)
// ...
// timeout := budget.takeAvailable()
// // Now you can spend at most timeout on doing stuff
// ...
// // If you didn't use all timeout, return what you didn't use
// budget.returnUnused(<unused part of timeout>)
//
// budget := newTimeBudget(...)
// ...
// timeout := budget.takeAvailable()
// // Now you can spend at most timeout on doing stuff
// ...
// // If you didn't use all timeout, return what you didn't use
// budget.returnUnused(<unused part of timeout>)
//
// NOTE: It's not recommended to be used concurrently from multiple threads -
// if first user takes the whole timeout, the second one will get 0 timeout

View File

@ -53,7 +53,9 @@ import (
// an interval as invalid iff we need to copy events from the
// watchCache and we end up needing events that have already
// been popped off. This translates to the following condition:
// watchCacheInterval::startIndex >= watchCache::startIndex.
//
// watchCacheInterval::startIndex >= watchCache::startIndex.
//
// When this condition becomes false, the interval is no longer
// valid and should not be used to retrieve and serve elements
// from the underlying source.

View File

@ -47,7 +47,8 @@ func NewETCDLatencyTracker(delegate clientv3.KV) clientv3.KV {
// tracking function TrackStorageLatency is thread safe.
//
// NOTE: Compact is an asynchronous process and is not associated with
// any request, so we will not be tracking its latency.
//
// any request, so we will not be tracking its latency.
type clientV3KVLatencyTracker struct {
clientv3.KV
}

View File

@ -20,13 +20,18 @@ package testingcert
// https://github.com/coreos/etcd/tree/master/hack/tls-setup for more details.
//
// ca-config.json:
// expiry was changed from 1 year to 100 years (876000h)
//
// expiry was changed from 1 year to 100 years (876000h)
//
// ca-csr.json:
// ca expiry was set to 100 years (876000h) ("ca":{"expiry":"876000h"})
// key was changed from ecdsa,384 to rsa,2048
//
// ca expiry was set to 100 years (876000h) ("ca":{"expiry":"876000h"})
// key was changed from ecdsa,384 to rsa,2048
//
// req-csr.json:
// key was changed from ecdsa,384 to rsa,2048
// hosts were changed to "localhost","127.0.0.1"
//
// key was changed from ecdsa,384 to rsa,2048
// hosts were changed to "localhost","127.0.0.1"
const CAFileContent = `
-----BEGIN CERTIFICATE-----
MIIEUDCCAzigAwIBAgIUKfV5+qwlw3JneAPdJS7JCO8xIlYwDQYJKoZIhvcNAQEL

View File

@ -49,11 +49,11 @@ func getAvailablePorts(count int) ([]int, error) {
// NewTestConfig returns a configuration for an embedded etcd server.
// The configuration is based on embed.NewConfig(), with the following adjustments:
// * sets UnsafeNoFsync = true to improve test performance (only reasonable in a test-only
// single-member server we never intend to restart or keep data from)
// * uses free ports for client and peer listeners
// * cleans up the data directory on test termination
// * silences server logs other than errors
// - sets UnsafeNoFsync = true to improve test performance (only reasonable in a test-only
// single-member server we never intend to restart or keep data from)
// - uses free ports for client and peer listeners
// - cleans up the data directory on test termination
// - silences server logs other than errors
func NewTestConfig(t *testing.T) *embed.Config {
cfg := embed.NewConfig()

View File

@ -91,10 +91,11 @@ func findStatusCondition(conditions []v1alpha1.StorageVersionCondition,
// setStatusCondition sets the corresponding condition in conditions to newCondition.
// conditions must be non-nil.
// 1. if the condition of the specified type already exists: all fields of the existing condition are updated to
// newCondition, LastTransitionTime is set to now if the new status differs from the old status
// 2. if a condition of the specified type does not exist: LastTransitionTime is set to now() if unset,
// and newCondition is appended
// 1. if the condition of the specified type already exists: all fields of the existing condition are updated to
// newCondition, LastTransitionTime is set to now if the new status differs from the old status
// 2. if a condition of the specified type does not exist: LastTransitionTime is set to now() if unset,
// and newCondition is appended
//
// NOTE: forceTransition allows overwriting LastTransitionTime even when the status doesn't change.
func setStatusCondition(conditions *[]v1alpha1.StorageVersionCondition, newCondition v1alpha1.StorageVersionCondition,
forceTransition bool) {

View File

@ -52,12 +52,12 @@ limitations under the License.
// limit. In the original paper, the partial derivative of R(t) with
// respect to t is
//
// 1 / NEQ(t) .
// 1 / NEQ(t) .
//
// To generalize from transmitting one packet at a time to executing C
// requests at a time, that derivative becomes
//
// C / NEQ(t) .
// C / NEQ(t) .
//
// However, sometimes there are fewer than C requests available to
// execute. For a given queue "q", let us also write "reqs(q, t)" for
@ -70,7 +70,7 @@ limitations under the License.
// for server requests: at a particular time t, the partial derivative
// of R(t) with respect to t is
//
// min( C, sum[over q] reqs(q, t) ) / NEQ(t) .
// min( C, sum[over q] reqs(q, t) ) / NEQ(t) .
//
// In terms of the implementation outline, this is the rate at which
// virtual time is advancing at time t (in virtual nanoseconds per
@ -116,5 +116,4 @@ limitations under the License.
// queues virtual start time is advanced by G. When a request
// finishes being served, and the actual service time was S, the
// queues virtual start time is decremented by G - S.
//
package queueset

View File

@ -970,19 +970,19 @@ func TestTimeout(t *testing.T) {
// TestContextCancel tests cancellation of a request's context.
// The outline is:
// 1. Use a concurrency limit of 1.
// 2. Start request 1.
// 3. Use a fake clock for the following logic, to insulate from scheduler noise.
// 4. The exec fn of request 1 starts request 2, which should wait
// in its queue.
// 5. The exec fn of request 1 also forks a goroutine that waits 1 second
// and then cancels the context of request 2.
// 6. The exec fn of request 1, if StartRequest 2 returns a req2 (which is the normal case),
// calls `req2.Finish`, which is expected to return after the context cancel.
// 7. The queueset interface allows StartRequest 2 to return `nil` in this situation,
// if the scheduler gets the cancel done before StartRequest finishes;
// the test handles this without regard to whether the implementation will ever do that.
// 8. Check that the above took exactly 1 second.
// 1. Use a concurrency limit of 1.
// 2. Start request 1.
// 3. Use a fake clock for the following logic, to insulate from scheduler noise.
// 4. The exec fn of request 1 starts request 2, which should wait
// in its queue.
// 5. The exec fn of request 1 also forks a goroutine that waits 1 second
// and then cancels the context of request 2.
// 6. The exec fn of request 1, if StartRequest 2 returns a req2 (which is the normal case),
// calls `req2.Finish`, which is expected to return after the context cancel.
// 7. The queueset interface allows StartRequest 2 to return `nil` in this situation,
// if the scheduler gets the cancel done before StartRequest finishes;
// the test handles this without regard to whether the implementation will ever do that.
// 8. Check that the above took exactly 1 second.
func TestContextCancel(t *testing.T) {
metrics.Register()
metrics.Reset()

View File

@ -40,7 +40,8 @@ type waitGroupCounter struct {
}
// compile time assertion that waitGroupCounter meets requirements
// of GoRoutineCounter
//
// of GoRoutineCounter
var _ counter.GoRoutineCounter = (*waitGroupCounter)(nil)
func (wgc *waitGroupCounter) Add(delta int) {

View File

@ -22,7 +22,8 @@ type Observer interface {
Observe(float64)
}
// ChangeObserver extends Observer with the ability to take
// ChangeObserver extends Observer with the ability to take
//
// an observation that is relative to the previous observation.
type ChangeObserver interface {
Observer

View File

@ -38,19 +38,21 @@ const (
var errMetricNotFound = errors.New("not found")
/* TestSampler does a rough behavioral test of the sampling in a
SampleAndWatermarkHistograms. The test creates one and exercises
it, checking that the count in the sampling histogram is correct at
each step. The sampling histogram is expected to get one
observation at the end of each sampling period. A fake clock is
used, and the exercise consists of repeatedly changing that fake
clock by an amount of time chosen uniformly at random from a range
that goes from a little negative to somewhat more than two sampling
periods. The negative changes are included because small negative
changes have been observed in real monotonic clock readings (see
issue #96459) and we want to test that they are properly tolerated.
The designed toleration is to pretend that the clock did not
change, until it resumes net forward progress.
/*
TestSampler does a rough behavioral test of the sampling in a
SampleAndWatermarkHistograms. The test creates one and exercises
it, checking that the count in the sampling histogram is correct at
each step. The sampling histogram is expected to get one
observation at the end of each sampling period. A fake clock is
used, and the exercise consists of repeatedly changing that fake
clock by an amount of time chosen uniformly at random from a range
that goes from a little negative to somewhat more than two sampling
periods. The negative changes are included because small negative
changes have been observed in real monotonic clock readings (see
issue #96459) and we want to test that they are properly tolerated.
The designed toleration is to pretend that the clock did not
change, until it resumes net forward progress.
*/
func TestSampler(t *testing.T) {
t0 := time.Now()

View File

@ -141,7 +141,8 @@ func key(requestInfo *apirequest.RequestInfo) string {
}
// NOTICE: Keep in sync with shouldDelegateList function in
// staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go
//
// staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go
func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool {
resourceVersion := opts.ResourceVersion
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)

View File

@ -195,8 +195,9 @@ func (w *watchTracker) forgetWatch(identifier *watchIdentifier, index *indexValu
// GetInterestedWatchCount implements WatchTracker interface.
//
// TODO(wojtek-t): As of now, requestInfo for object creation (POST) doesn't
// contain the Name field set. Figure out if we can somehow get it for the
// more accurate cost estimation.
//
// contain the Name field set. Figure out if we can somehow get it for the
// more accurate cost estimation.
//
// TODO(wojtek-t): Figure out how to approach DELETECOLLECTION calls.
func (w *watchTracker) GetInterestedWatchCount(requestInfo *request.RequestInfo) int {

View File

@ -38,7 +38,8 @@ func NewDefaultServiceResolver() ServiceResolver {
// note that the name, namespace, and port are required and by default all
// created addresses use HTTPS scheme.
// for example:
// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443
//
// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443
func (sr defaultServiceResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) {
if len(name) == 0 || len(namespace) == 0 || port == 0 {
return nil, errors.New("cannot resolve an empty service name or namespace or port")

View File

@ -40,11 +40,10 @@ import (
//
// Example client session:
//
// CONNECT http://server.com with subprotocol "channel.k8s.io"
// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN)
// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT)
// CLOSE
//
// CONNECT http://server.com with subprotocol "channel.k8s.io"
// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN)
// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT)
// CLOSE
const ChannelWebSocketProtocol = "channel.k8s.io"
// The Websocket subprotocol "base64.channel.k8s.io" base64 encodes each message with a character
@ -56,11 +55,10 @@ const ChannelWebSocketProtocol = "channel.k8s.io"
//
// Example client session:
//
// CONNECT http://server.com with subprotocol "base64.channel.k8s.io"
// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN)
// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT)
// CLOSE
//
// CONNECT http://server.com with subprotocol "base64.channel.k8s.io"
// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN)
// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT)
// CLOSE
const Base64ChannelWebSocketProtocol = "base64.channel.k8s.io"
type codecType int

View File

@ -75,17 +75,17 @@ func (c *counterRaiser) IncreaseMetricsCounter(req *http.Request) {
// NewDeprecatedCertificateRoundTripperWrapperConstructor returns a RoundTripper wrapper that's usable within ClientConfig.Wrap.
//
// It increases the `missingSAN` counter whenever:
// 1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field`
// which indicates an error caused by the deprecation of Common Name field when veryfing remote
// hostname
// 2. the server certificate in response contains no SAN. This indicates that this binary run
// with the GODEBUG=x509ignoreCN=0 in env
// 1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field`
// which indicates an error caused by the deprecation of Common Name field when veryfing remote
// hostname
// 2. the server certificate in response contains no SAN. This indicates that this binary run
// with the GODEBUG=x509ignoreCN=0 in env
//
// It increases the `sha1` counter whenever:
// 1. we get a x509.InsecureAlgorithmError with string `SHA1`
// which indicates an error caused by an insecure SHA1 signature
// 2. the server certificate in response contains a SHA1WithRSA or ECDSAWithSHA1 signature.
// This indicates that this binary run with the GODEBUG=x509sha1=1 in env
// 1. we get a x509.InsecureAlgorithmError with string `SHA1`
// which indicates an error caused by an insecure SHA1 signature
// 2. the server certificate in response contains a SHA1WithRSA or ECDSAWithSHA1 signature.
// This indicates that this binary run with the GODEBUG=x509sha1=1 in env
func NewDeprecatedCertificateRoundTripperWrapperConstructor(missingSAN, sha1 *metrics.Counter) func(rt http.RoundTripper) http.RoundTripper {
return func(rt http.RoundTripper) http.RoundTripper {
return &x509DeprecatedCertificateMetricsRTWrapper{

View File

@ -190,10 +190,10 @@ func (b *bufferedBackend) processIncomingEvents(stopCh <-chan struct{}) {
// The following things can cause collectEvents to stop and return the list
// of events:
//
// * Maximum number of events for a batch.
// * Timer has passed.
// * Buffer channel is closed and empty.
// * stopCh is closed.
// - Maximum number of events for a batch.
// - Timer has passed.
// - Buffer channel is closed and empty.
// - stopCh is closed.
func (b *bufferedBackend) collectEvents(timer <-chan time.Time, stopCh <-chan struct{}) []*auditinternal.Event {
var events []*auditinternal.Event

View File

@ -452,18 +452,18 @@ func (r *claimResolver) Verifier(iss string) (*oidc.IDTokenVerifier, error) {
// OIDC Connect Core 1.0, section 5.6.2.
// See: https://openid.net/specs/openid-connect-core-1_0.html#AggregatedDistributedClaims
//
// {
// ... (other normal claims)...
// "_claim_names": {
// "groups": "src1"
// },
// "_claim_sources": {
// "src1": {
// "endpoint": "https://www.example.com",
// "access_token": "f005ba11"
// },
// },
// }
// {
// ... (other normal claims)...
// "_claim_names": {
// "groups": "src1"
// },
// "_claim_sources": {
// "src1": {
// "endpoint": "https://www.example.com",
// "access_token": "f005ba11"
// },
// },
// }
func (r *claimResolver) expand(c claims) error {
const (
// The claim containing a map of endpoint references per claim.

View File

@ -76,19 +76,19 @@ func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1I
// New creates a new WebhookAuthorizer from the provided kubeconfig file.
// The config's cluster field is used to refer to the remote service, user refers to the returned authorizer.
//
// # clusters refers to the remote service.
// clusters:
// - name: name-of-remote-authz-service
// cluster:
// certificate-authority: /path/to/ca.pem # CA for verifying the remote service.
// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'.
// # clusters refers to the remote service.
// clusters:
// - name: name-of-remote-authz-service
// cluster:
// certificate-authority: /path/to/ca.pem # CA for verifying the remote service.
// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'.
//
// # users refers to the API server's webhook configuration.
// users:
// - name: name-of-api-server
// user:
// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use
// client-key: /path/to/key.pem # key matching the cert
// # users refers to the API server's webhook configuration.
// users:
// - name: name-of-api-server
// user:
// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use
// client-key: /path/to/key.pem # key matching the cert
//
// For additional HTTP configuration, refer to the kubeconfig documentation
// https://kubernetes.io/docs/user-guide/kubeconfig-file/.
@ -120,45 +120,45 @@ func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, un
// serialized api.authorization.v1beta1.SubjectAccessReview object. An example request body is
// provided below.
//
// {
// "apiVersion": "authorization.k8s.io/v1beta1",
// "kind": "SubjectAccessReview",
// "spec": {
// "resourceAttributes": {
// "namespace": "kittensandponies",
// "verb": "GET",
// "group": "group3",
// "resource": "pods"
// },
// "user": "jane",
// "group": [
// "group1",
// "group2"
// ]
// }
// }
// {
// "apiVersion": "authorization.k8s.io/v1beta1",
// "kind": "SubjectAccessReview",
// "spec": {
// "resourceAttributes": {
// "namespace": "kittensandponies",
// "verb": "GET",
// "group": "group3",
// "resource": "pods"
// },
// "user": "jane",
// "group": [
// "group1",
// "group2"
// ]
// }
// }
//
// The remote service is expected to fill the SubjectAccessReviewStatus field to either allow or
// disallow access. A permissive response would return:
//
// {
// "apiVersion": "authorization.k8s.io/v1beta1",
// "kind": "SubjectAccessReview",
// "status": {
// "allowed": true
// }
// }
// {
// "apiVersion": "authorization.k8s.io/v1beta1",
// "kind": "SubjectAccessReview",
// "status": {
// "allowed": true
// }
// }
//
// To disallow access, the remote service would return:
//
// {
// "apiVersion": "authorization.k8s.io/v1beta1",
// "kind": "SubjectAccessReview",
// "status": {
// "allowed": false,
// "reason": "user does not have read access to the namespace"
// }
// }
// {
// "apiVersion": "authorization.k8s.io/v1beta1",
// "kind": "SubjectAccessReview",
// "status": {
// "allowed": false,
// "reason": "user does not have read access to the namespace"
// }
// }
//
// TODO(mikedanese): We should eventually support failing closed when we
// encounter an error. We are failing open now to preserve backwards compatible
@ -246,7 +246,7 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri
}
//TODO: need to finish the method to get the rules when using webhook mode
// TODO: need to finish the method to get the rules when using webhook mode
func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) {
var (
resourceRules []authorizer.ResourceRuleInfo