Generate and format files
- Run hack/update-codegen.sh - Run hack/update-generated-device-plugin.sh - Run hack/update-generated-protobuf.sh - Run hack/update-generated-runtime.sh - Run hack/update-generated-swagger-docs.sh - Run hack/update-openapi-spec.sh - Run hack/update-gofmt.sh Replay of a9593d634c6a053848413e600dadbf974627515f Kubernetes-commit: 203d8ac83844b8ea8ece8ece7630cb22ef8f327d
This commit is contained in:
parent
74cc4c7a5b
commit
14f249c6d3
|
@ -52,9 +52,10 @@ type WantsQuotaConfiguration interface {
|
|||
// WantsFeatureGate defines a function which passes the featureGates for inspection by an admission plugin.
|
||||
// Admission plugins should not hold a reference to the featureGates. Instead, they should query a particular one
|
||||
// and assign it to a simple bool in the admission plugin struct.
|
||||
// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){
|
||||
// a.myFeatureIsOn = features.Enabled("my-feature")
|
||||
// }
|
||||
//
|
||||
// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){
|
||||
// a.myFeatureIsOn = features.Enabled("my-feature")
|
||||
// }
|
||||
type WantsFeatures interface {
|
||||
InspectFeatureGates(featuregate.FeatureGate)
|
||||
admission.InitializationValidator
|
||||
|
|
|
@ -202,16 +202,16 @@ func (e *quotaEvaluator) checkAttributes(ns string, admissionAttributes []*admis
|
|||
|
||||
// checkQuotas checks the admission attributes against the passed quotas. If a quota applies, it will attempt to update it
|
||||
// AFTER it has checked all the admissionAttributes. The method breaks down into phase like this:
|
||||
// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the
|
||||
// originals
|
||||
// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed
|
||||
// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change
|
||||
// mark the waiter as succeeded. If some quota did change, update the running quotas
|
||||
// 2. If no running quota was changed, return now since no updates are needed.
|
||||
// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some
|
||||
// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version
|
||||
// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota
|
||||
// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded.
|
||||
// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the
|
||||
// originals
|
||||
// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed
|
||||
// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change
|
||||
// mark the waiter as succeeded. If some quota did change, update the running quotas
|
||||
// 2. If no running quota was changed, return now since no updates are needed.
|
||||
// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some
|
||||
// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version
|
||||
// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota
|
||||
// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded.
|
||||
func (e *quotaEvaluator) checkQuotas(quotas []corev1.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) {
|
||||
// yet another copy to compare against originals to see if we actually have deltas
|
||||
originalQuotas, err := copyQuotas(quotas)
|
||||
|
|
|
@ -18,15 +18,17 @@ limitations under the License.
|
|||
// source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
|
||||
It is generated from these files:
|
||||
|
||||
It has these top-level messages:
|
||||
ReplicaSet
|
||||
ReplicaSetSpec
|
||||
ReplicaSetStatus
|
||||
k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto
|
||||
|
||||
It has these top-level messages:
|
||||
|
||||
ReplicaSet
|
||||
ReplicaSetSpec
|
||||
ReplicaSetStatus
|
||||
*/
|
||||
package v1
|
||||
|
||||
|
|
|
@ -43,8 +43,9 @@ var leadingDigits = regexp.MustCompile(`^(\d+)`)
|
|||
|
||||
// MajorMinor parses a numeric major/minor version from the provided version info.
|
||||
// The minor version drops all characters after the first non-digit character:
|
||||
// version.Info{Major:"1", Minor:"2+"} -> 1,2
|
||||
// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2
|
||||
//
|
||||
// version.Info{Major:"1", Minor:"2+"} -> 1,2
|
||||
// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2
|
||||
func MajorMinor(v version.Info) (int, int, error) {
|
||||
major, err := strconv.Atoi(v.Major)
|
||||
if err != nil {
|
||||
|
|
|
@ -56,9 +56,9 @@ func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion sch
|
|||
// ToSubresource filter the managed fields of the main resource and convert
|
||||
// them so that they can be handled by scale.
|
||||
// For the managed fields that have a replicas path it performs two changes:
|
||||
// 1. APIVersion is changed to the APIVersion of the scale subresource
|
||||
// 2. Replicas path of the main resource is transformed to the replicas path of
|
||||
// the scale subresource
|
||||
// 1. APIVersion is changed to the APIVersion of the scale subresource
|
||||
// 2. Replicas path of the main resource is transformed to the replicas path of
|
||||
// the scale subresource
|
||||
func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) {
|
||||
managed, err := DecodeManagedFields(h.parentEntries)
|
||||
if err != nil {
|
||||
|
|
|
@ -371,7 +371,8 @@ func dedupOwnerReferencesAndAddWarning(obj runtime.Object, requestContext contex
|
|||
|
||||
// setObjectSelfLink sets the self link of an object as needed.
|
||||
// TODO: remove the need for the namer LinkSetters by requiring objects implement either Object or List
|
||||
// interfaces
|
||||
//
|
||||
// interfaces
|
||||
func setObjectSelfLink(ctx context.Context, obj runtime.Object, req *http.Request, namer ScopeNamer) error {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RemoveSelfLink) {
|
||||
// Ensure that for empty lists we don't return <nil> items.
|
||||
|
|
|
@ -593,7 +593,7 @@ func cleanVerb(verb, suggestedVerb string, request *http.Request) string {
|
|||
return OtherRequestMethod
|
||||
}
|
||||
|
||||
//getVerbIfWatch additionally ensures that GET or List would be transformed to WATCH
|
||||
// getVerbIfWatch additionally ensures that GET or List would be transformed to WATCH
|
||||
func getVerbIfWatch(req *http.Request) string {
|
||||
if strings.ToUpper(req.Method) == "GET" || strings.ToUpper(req.Method) == "LIST" {
|
||||
if checkIfWatch(req) {
|
||||
|
@ -603,7 +603,7 @@ func getVerbIfWatch(req *http.Request) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
//checkIfWatch check request is watch
|
||||
// checkIfWatch check request is watch
|
||||
func checkIfWatch(req *http.Request) bool {
|
||||
// see apimachinery/pkg/runtime/conversion.go Convert_Slice_string_To_bool
|
||||
if values := req.URL.Query()["watch"]; len(values) > 0 {
|
||||
|
|
|
@ -41,7 +41,7 @@ func TestNamespaceContext(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
//TestUserContext validates that a userinfo can be get/set on a context object
|
||||
// TestUserContext validates that a userinfo can be get/set on a context object
|
||||
func TestUserContext(t *testing.T) {
|
||||
ctx := NewContext()
|
||||
_, ok := UserFrom(ctx)
|
||||
|
|
|
@ -42,19 +42,19 @@ type UserProvidedDecorator interface {
|
|||
// object implements the same subset of those interfaces as the inner http.ResponseWriter.
|
||||
//
|
||||
// This function handles the following three casses.
|
||||
// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`,
|
||||
// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter).
|
||||
// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher`
|
||||
// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter).
|
||||
// - All the other cases collapse to this one, in which the given ResponseWriter is returned.
|
||||
// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`,
|
||||
// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter).
|
||||
// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher`
|
||||
// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter).
|
||||
// - All the other cases collapse to this one, in which the given ResponseWriter is returned.
|
||||
//
|
||||
// There are three applicable terms:
|
||||
// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function.
|
||||
// - "user-provided decorator" or "middle": this is the user-provided decorator
|
||||
// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function.
|
||||
// - "user-provided decorator" or "middle": this is the user-provided decorator
|
||||
// that decorates an inner ResponseWriter object. A user-provided decorator
|
||||
// implements the UserProvidedDecorator interface. A user-provided decorator
|
||||
// may or may not implement http.CloseNotifier, http.Flusher or http.Hijacker.
|
||||
// - "inner": the ResponseWriter that the user-provided decorator extends.
|
||||
// - "inner": the ResponseWriter that the user-provided decorator extends.
|
||||
func WrapForHTTP1Or2(decorator UserProvidedDecorator) http.ResponseWriter {
|
||||
// from go net/http documentation:
|
||||
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations support Flusher
|
||||
|
|
|
@ -894,13 +894,13 @@ func markAsDeleting(obj runtime.Object, now time.Time) (err error) {
|
|||
// grace period seconds (graceful deletion) and updating the list of
|
||||
// finalizers (finalization); it returns:
|
||||
//
|
||||
// 1. an error
|
||||
// 2. a boolean indicating that the object was not found, but it should be
|
||||
// ignored
|
||||
// 3. a boolean indicating that the object's grace period is exhausted and it
|
||||
// should be deleted immediately
|
||||
// 4. a new output object with the state that was updated
|
||||
// 5. a copy of the last existing state of the object
|
||||
// 1. an error
|
||||
// 2. a boolean indicating that the object was not found, but it should be
|
||||
// ignored
|
||||
// 3. a boolean indicating that the object's grace period is exhausted and it
|
||||
// should be deleted immediately
|
||||
// 4. a new output object with the state that was updated
|
||||
// 5. a copy of the last existing state of the object
|
||||
func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
|
||||
lastGraceful := int64(0)
|
||||
var pendingFinalizers bool
|
||||
|
|
|
@ -878,8 +878,8 @@ func (t *Tester) testDeleteNonExist(obj runtime.Object, opts metav1.DeleteOption
|
|||
|
||||
}
|
||||
|
||||
// This test the fast-fail path. We test that the precondition gets verified
|
||||
// again before deleting the object in tests of pkg/storage/etcd.
|
||||
// This test the fast-fail path. We test that the precondition gets verified
|
||||
// again before deleting the object in tests of pkg/storage/etcd.
|
||||
func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
|
||||
ctx := t.TestContext()
|
||||
|
||||
|
@ -915,8 +915,8 @@ func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getF
|
|||
}
|
||||
}
|
||||
|
||||
// This test the fast-fail path. We test that the precondition gets verified
|
||||
// again before deleting the object in tests of pkg/storage/etcd.
|
||||
// This test the fast-fail path. We test that the precondition gets verified
|
||||
// again before deleting the object in tests of pkg/storage/etcd.
|
||||
func (t *Tester) testDeleteWithResourceVersion(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
|
||||
ctx := t.TestContext()
|
||||
|
||||
|
|
|
@ -71,13 +71,13 @@ func withFakeUser(handler http.Handler) http.Handler {
|
|||
}
|
||||
|
||||
// Tests that MaxInFlightLimit works, i.e.
|
||||
// - "long" requests such as proxy or watch, identified by regexp are not accounted despite
|
||||
// hanging for the long time,
|
||||
// - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the
|
||||
// constructor in flight at any given moment,
|
||||
// - subsequent "short" requests are rejected instantly with appropriate error,
|
||||
// - subsequent "long" requests are handled normally,
|
||||
// - we correctly recover after some "short" requests finish, i.e. we can process new ones.
|
||||
// - "long" requests such as proxy or watch, identified by regexp are not accounted despite
|
||||
// hanging for the long time,
|
||||
// - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the
|
||||
// constructor in flight at any given moment,
|
||||
// - subsequent "short" requests are rejected instantly with appropriate error,
|
||||
// - subsequent "long" requests are handled normally,
|
||||
// - we correctly recover after some "short" requests finish, i.e. we can process new ones.
|
||||
func TestMaxInFlightNonMutating(t *testing.T) {
|
||||
const AllowedNonMutatingInflightRequestsNo = 3
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||
d.nonGoRestfulMux.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
//TODO: Unify with RecoverPanics?
|
||||
// TODO: Unify with RecoverPanics?
|
||||
func logStackOnRecover(s runtime.NegotiatedSerializer, panicReason interface{}, w http.ResponseWriter) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))
|
||||
|
|
|
@ -16,6 +16,7 @@ limitations under the License.
|
|||
|
||||
// Package healthz implements basic http server health checking.
|
||||
// Usage:
|
||||
// import "k8s.io/apiserver/pkg/server/healthz"
|
||||
// healthz.InstallHandler(mux)
|
||||
//
|
||||
// import "k8s.io/apiserver/pkg/server/healthz"
|
||||
// healthz.InstallHandler(mux)
|
||||
package healthz // import "k8s.io/apiserver/pkg/server/healthz"
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
// 2. conflicts between the different processes all trying to perform the same action
|
||||
// 3. partially complete work (API server crashes while running your hook)
|
||||
// 4. API server access **BEFORE** your hook has completed
|
||||
//
|
||||
// Think of it like a mini-controller that is super privileged and gets to run in-process
|
||||
// If you use this feature, tag @deads2k on github who has promised to review code for anyone's PostStartHook
|
||||
// until it becomes easier to use.
|
||||
|
|
|
@ -107,8 +107,8 @@ type lifecycleSignal interface {
|
|||
// for us to write unit tests that can verify expected graceful termination behavior.
|
||||
//
|
||||
// GenericAPIServer can use these to either:
|
||||
// - signal that a particular termination event has transpired
|
||||
// - wait for a designated termination event to transpire and do some action.
|
||||
// - signal that a particular termination event has transpired
|
||||
// - wait for a designated termination event to transpire and do some action.
|
||||
type lifecycleSignals struct {
|
||||
// ShutdownInitiated event is signaled when an apiserver shutdown has been initiated.
|
||||
// It is signaled when the `stopCh` provided by the main goroutine
|
||||
|
|
|
@ -70,12 +70,13 @@ type AdmissionOptions struct {
|
|||
|
||||
// NewAdmissionOptions creates a new instance of AdmissionOptions
|
||||
// Note:
|
||||
// In addition it calls RegisterAllAdmissionPlugins to register
|
||||
// all generic admission plugins.
|
||||
//
|
||||
// Provides the list of RecommendedPluginOrder that holds sane values
|
||||
// that can be used by servers that don't care about admission chain.
|
||||
// Servers that do care can overwrite/append that field after creation.
|
||||
// In addition it calls RegisterAllAdmissionPlugins to register
|
||||
// all generic admission plugins.
|
||||
//
|
||||
// Provides the list of RecommendedPluginOrder that holds sane values
|
||||
// that can be used by servers that don't care about admission chain.
|
||||
// Servers that do care can overwrite/append that field after creation.
|
||||
func NewAdmissionOptions() *AdmissionOptions {
|
||||
options := &AdmissionOptions{
|
||||
Plugins: admission.NewPlugins(),
|
||||
|
@ -115,7 +116,8 @@ func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) {
|
|||
// In case admission plugin names were not provided by a cluster-admin they will be prepared from the recommended/default values.
|
||||
// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers
|
||||
// note this method uses:
|
||||
// genericconfig.Authorizer
|
||||
//
|
||||
// genericconfig.Authorizer
|
||||
func (a *AdmissionOptions) ApplyTo(
|
||||
c *server.Config,
|
||||
informers informers.SharedInformerFactory,
|
||||
|
@ -221,7 +223,7 @@ func (a *AdmissionOptions) enabledPluginNames() []string {
|
|||
return orderedPlugins
|
||||
}
|
||||
|
||||
//Return names of plugins which are enabled by default
|
||||
// Return names of plugins which are enabled by default
|
||||
func (a *AdmissionOptions) defaultEnabledPluginNames() []string {
|
||||
defaultOnPluginNames := []string{}
|
||||
for _, pluginName := range a.RecommendedPluginOrder {
|
||||
|
|
|
@ -38,8 +38,9 @@ import (
|
|||
// DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to
|
||||
// the root kube API server.
|
||||
// WARNING: never assume that every authenticated incoming request already does authorization.
|
||||
// The aggregator in the kube API server does this today, but this behaviour is not
|
||||
// guaranteed in the future.
|
||||
//
|
||||
// The aggregator in the kube API server does this today, but this behaviour is not
|
||||
// guaranteed in the future.
|
||||
type DelegatingAuthorizationOptions struct {
|
||||
// RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the
|
||||
// SubjectAccessReview.authorization.k8s.io endpoint for checking tokens.
|
||||
|
|
|
@ -207,7 +207,8 @@ func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Dur
|
|||
// It returns a stoppedCh that is closed when all non-hijacked active requests have been processed.
|
||||
// It returns a listenerStoppedCh that is closed when the underlying http Server has stopped listening.
|
||||
// TODO: do a follow up PR to remove this function, change 'Serve' to return listenerStoppedCh
|
||||
// and update all components that call 'Serve'
|
||||
//
|
||||
// and update all components that call 'Serve'
|
||||
func (s *SecureServingInfo) ServeWithListenerStopped(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) (<-chan struct{}, <-chan struct{}, error) {
|
||||
if s.Listener == nil {
|
||||
return nil, nil, fmt.Errorf("listener must not be nil")
|
||||
|
|
|
@ -591,7 +591,8 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o
|
|||
}
|
||||
|
||||
// NOTICE: Keep in sync with shouldListFromStorage function in
|
||||
// staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
|
||||
//
|
||||
// staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
|
||||
func shouldDelegateList(opts storage.ListOptions) bool {
|
||||
resourceVersion := opts.ResourceVersion
|
||||
pred := opts.Predicate
|
||||
|
|
|
@ -30,13 +30,14 @@ const (
|
|||
|
||||
// timeBudget implements a budget of time that you can use and is
|
||||
// periodically being refreshed. The pattern to use it is:
|
||||
// budget := newTimeBudget(...)
|
||||
// ...
|
||||
// timeout := budget.takeAvailable()
|
||||
// // Now you can spend at most timeout on doing stuff
|
||||
// ...
|
||||
// // If you didn't use all timeout, return what you didn't use
|
||||
// budget.returnUnused(<unused part of timeout>)
|
||||
//
|
||||
// budget := newTimeBudget(...)
|
||||
// ...
|
||||
// timeout := budget.takeAvailable()
|
||||
// // Now you can spend at most timeout on doing stuff
|
||||
// ...
|
||||
// // If you didn't use all timeout, return what you didn't use
|
||||
// budget.returnUnused(<unused part of timeout>)
|
||||
//
|
||||
// NOTE: It's not recommended to be used concurrently from multiple threads -
|
||||
// if first user takes the whole timeout, the second one will get 0 timeout
|
||||
|
|
|
@ -20,13 +20,18 @@ package testingcert
|
|||
// https://github.com/coreos/etcd/tree/master/hack/tls-setup for more details.
|
||||
//
|
||||
// ca-config.json:
|
||||
// expiry was changed from 1 year to 100 years (876000h)
|
||||
//
|
||||
// expiry was changed from 1 year to 100 years (876000h)
|
||||
//
|
||||
// ca-csr.json:
|
||||
// ca expiry was set to 100 years (876000h) ("ca":{"expiry":"876000h"})
|
||||
// key was changed from ecdsa,384 to rsa,2048
|
||||
//
|
||||
// ca expiry was set to 100 years (876000h) ("ca":{"expiry":"876000h"})
|
||||
// key was changed from ecdsa,384 to rsa,2048
|
||||
//
|
||||
// req-csr.json:
|
||||
// key was changed from ecdsa,384 to rsa,2048
|
||||
// hosts were changed to "localhost","127.0.0.1"
|
||||
//
|
||||
// key was changed from ecdsa,384 to rsa,2048
|
||||
// hosts were changed to "localhost","127.0.0.1"
|
||||
const CAFileContent = `
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEUDCCAzigAwIBAgIUKfV5+qwlw3JneAPdJS7JCO8xIlYwDQYJKoZIhvcNAQEL
|
||||
|
|
|
@ -49,11 +49,11 @@ func getAvailablePorts(count int) ([]int, error) {
|
|||
|
||||
// NewTestConfig returns a configuration for an embedded etcd server.
|
||||
// The configuration is based on embed.NewConfig(), with the following adjustments:
|
||||
// * sets UnsafeNoFsync = true to improve test performance (only reasonable in a test-only
|
||||
// single-member server we never intend to restart or keep data from)
|
||||
// * uses free ports for client and peer listeners
|
||||
// * cleans up the data directory on test termination
|
||||
// * silences server logs other than errors
|
||||
// - sets UnsafeNoFsync = true to improve test performance (only reasonable in a test-only
|
||||
// single-member server we never intend to restart or keep data from)
|
||||
// - uses free ports for client and peer listeners
|
||||
// - cleans up the data directory on test termination
|
||||
// - silences server logs other than errors
|
||||
func NewTestConfig(t *testing.T) *embed.Config {
|
||||
cfg := embed.NewConfig()
|
||||
|
||||
|
|
|
@ -91,10 +91,11 @@ func findStatusCondition(conditions []v1alpha1.StorageVersionCondition,
|
|||
|
||||
// setStatusCondition sets the corresponding condition in conditions to newCondition.
|
||||
// conditions must be non-nil.
|
||||
// 1. if the condition of the specified type already exists: all fields of the existing condition are updated to
|
||||
// newCondition, LastTransitionTime is set to now if the new status differs from the old status
|
||||
// 2. if a condition of the specified type does not exist: LastTransitionTime is set to now() if unset,
|
||||
// and newCondition is appended
|
||||
// 1. if the condition of the specified type already exists: all fields of the existing condition are updated to
|
||||
// newCondition, LastTransitionTime is set to now if the new status differs from the old status
|
||||
// 2. if a condition of the specified type does not exist: LastTransitionTime is set to now() if unset,
|
||||
// and newCondition is appended
|
||||
//
|
||||
// NOTE: forceTransition allows overwriting LastTransitionTime even when the status doesn't change.
|
||||
func setStatusCondition(conditions *[]v1alpha1.StorageVersionCondition, newCondition v1alpha1.StorageVersionCondition,
|
||||
forceTransition bool) {
|
||||
|
|
|
@ -52,12 +52,12 @@ limitations under the License.
|
|||
// limit. In the original paper, the partial derivative of R(t) with
|
||||
// respect to t is
|
||||
//
|
||||
// 1 / NEQ(t) .
|
||||
// 1 / NEQ(t) .
|
||||
//
|
||||
// To generalize from transmitting one packet at a time to executing C
|
||||
// requests at a time, that derivative becomes
|
||||
//
|
||||
// C / NEQ(t) .
|
||||
// C / NEQ(t) .
|
||||
//
|
||||
// However, sometimes there are fewer than C requests available to
|
||||
// execute. For a given queue "q", let us also write "reqs(q, t)" for
|
||||
|
@ -70,7 +70,7 @@ limitations under the License.
|
|||
// for server requests: at a particular time t, the partial derivative
|
||||
// of R(t) with respect to t is
|
||||
//
|
||||
// min( C, sum[over q] reqs(q, t) ) / NEQ(t) .
|
||||
// min( C, sum[over q] reqs(q, t) ) / NEQ(t) .
|
||||
//
|
||||
// In terms of the implementation outline, this is the rate at which
|
||||
// virtual time is advancing at time t (in virtual nanoseconds per
|
||||
|
@ -116,5 +116,4 @@ limitations under the License.
|
|||
// queue’s virtual start time is advanced by G. When a request
|
||||
// finishes being served, and the actual service time was S, the
|
||||
// queue’s virtual start time is decremented by G - S.
|
||||
//
|
||||
package queueset
|
||||
|
|
|
@ -970,19 +970,19 @@ func TestTimeout(t *testing.T) {
|
|||
|
||||
// TestContextCancel tests cancellation of a request's context.
|
||||
// The outline is:
|
||||
// 1. Use a concurrency limit of 1.
|
||||
// 2. Start request 1.
|
||||
// 3. Use a fake clock for the following logic, to insulate from scheduler noise.
|
||||
// 4. The exec fn of request 1 starts request 2, which should wait
|
||||
// in its queue.
|
||||
// 5. The exec fn of request 1 also forks a goroutine that waits 1 second
|
||||
// and then cancels the context of request 2.
|
||||
// 6. The exec fn of request 1, if StartRequest 2 returns a req2 (which is the normal case),
|
||||
// calls `req2.Finish`, which is expected to return after the context cancel.
|
||||
// 7. The queueset interface allows StartRequest 2 to return `nil` in this situation,
|
||||
// if the scheduler gets the cancel done before StartRequest finishes;
|
||||
// the test handles this without regard to whether the implementation will ever do that.
|
||||
// 8. Check that the above took exactly 1 second.
|
||||
// 1. Use a concurrency limit of 1.
|
||||
// 2. Start request 1.
|
||||
// 3. Use a fake clock for the following logic, to insulate from scheduler noise.
|
||||
// 4. The exec fn of request 1 starts request 2, which should wait
|
||||
// in its queue.
|
||||
// 5. The exec fn of request 1 also forks a goroutine that waits 1 second
|
||||
// and then cancels the context of request 2.
|
||||
// 6. The exec fn of request 1, if StartRequest 2 returns a req2 (which is the normal case),
|
||||
// calls `req2.Finish`, which is expected to return after the context cancel.
|
||||
// 7. The queueset interface allows StartRequest 2 to return `nil` in this situation,
|
||||
// if the scheduler gets the cancel done before StartRequest finishes;
|
||||
// the test handles this without regard to whether the implementation will ever do that.
|
||||
// 8. Check that the above took exactly 1 second.
|
||||
func TestContextCancel(t *testing.T) {
|
||||
metrics.Register()
|
||||
metrics.Reset()
|
||||
|
|
|
@ -40,7 +40,8 @@ type waitGroupCounter struct {
|
|||
}
|
||||
|
||||
// compile time assertion that waitGroupCounter meets requirements
|
||||
// of GoRoutineCounter
|
||||
//
|
||||
// of GoRoutineCounter
|
||||
var _ counter.GoRoutineCounter = (*waitGroupCounter)(nil)
|
||||
|
||||
func (wgc *waitGroupCounter) Add(delta int) {
|
||||
|
|
|
@ -22,7 +22,8 @@ type Observer interface {
|
|||
Observe(float64)
|
||||
}
|
||||
|
||||
// ChangeObserver extends Observer with the ability to take
|
||||
// ChangeObserver extends Observer with the ability to take
|
||||
//
|
||||
// an observation that is relative to the previous observation.
|
||||
type ChangeObserver interface {
|
||||
Observer
|
||||
|
|
|
@ -38,19 +38,21 @@ const (
|
|||
|
||||
var errMetricNotFound = errors.New("not found")
|
||||
|
||||
/* TestSampler does a rough behavioral test of the sampling in a
|
||||
SampleAndWatermarkHistograms. The test creates one and exercises
|
||||
it, checking that the count in the sampling histogram is correct at
|
||||
each step. The sampling histogram is expected to get one
|
||||
observation at the end of each sampling period. A fake clock is
|
||||
used, and the exercise consists of repeatedly changing that fake
|
||||
clock by an amount of time chosen uniformly at random from a range
|
||||
that goes from a little negative to somewhat more than two sampling
|
||||
periods. The negative changes are included because small negative
|
||||
changes have been observed in real monotonic clock readings (see
|
||||
issue #96459) and we want to test that they are properly tolerated.
|
||||
The designed toleration is to pretend that the clock did not
|
||||
change, until it resumes net forward progress.
|
||||
/*
|
||||
TestSampler does a rough behavioral test of the sampling in a
|
||||
|
||||
SampleAndWatermarkHistograms. The test creates one and exercises
|
||||
it, checking that the count in the sampling histogram is correct at
|
||||
each step. The sampling histogram is expected to get one
|
||||
observation at the end of each sampling period. A fake clock is
|
||||
used, and the exercise consists of repeatedly changing that fake
|
||||
clock by an amount of time chosen uniformly at random from a range
|
||||
that goes from a little negative to somewhat more than two sampling
|
||||
periods. The negative changes are included because small negative
|
||||
changes have been observed in real monotonic clock readings (see
|
||||
issue #96459) and we want to test that they are properly tolerated.
|
||||
The designed toleration is to pretend that the clock did not
|
||||
change, until it resumes net forward progress.
|
||||
*/
|
||||
func TestSampler(t *testing.T) {
|
||||
t0 := time.Now()
|
||||
|
|
|
@ -141,7 +141,8 @@ func key(requestInfo *apirequest.RequestInfo) string {
|
|||
}
|
||||
|
||||
// NOTICE: Keep in sync with shouldDelegateList function in
|
||||
// staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go
|
||||
//
|
||||
// staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go
|
||||
func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool {
|
||||
resourceVersion := opts.ResourceVersion
|
||||
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
|
||||
|
|
|
@ -195,8 +195,9 @@ func (w *watchTracker) forgetWatch(identifier *watchIdentifier, index *indexValu
|
|||
// GetInterestedWatchCount implements WatchTracker interface.
|
||||
//
|
||||
// TODO(wojtek-t): As of now, requestInfo for object creation (POST) doesn't
|
||||
// contain the Name field set. Figure out if we can somehow get it for the
|
||||
// more accurate cost estimation.
|
||||
//
|
||||
// contain the Name field set. Figure out if we can somehow get it for the
|
||||
// more accurate cost estimation.
|
||||
//
|
||||
// TODO(wojtek-t): Figure out how to approach DELETECOLLECTION calls.
|
||||
func (w *watchTracker) GetInterestedWatchCount(requestInfo *request.RequestInfo) int {
|
||||
|
|
|
@ -38,7 +38,8 @@ func NewDefaultServiceResolver() ServiceResolver {
|
|||
// note that the name, namespace, and port are required and by default all
|
||||
// created addresses use HTTPS scheme.
|
||||
// for example:
|
||||
// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443
|
||||
//
|
||||
// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443
|
||||
func (sr defaultServiceResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) {
|
||||
if len(name) == 0 || len(namespace) == 0 || port == 0 {
|
||||
return nil, errors.New("cannot resolve an empty service name or namespace or port")
|
||||
|
|
|
@ -40,11 +40,10 @@ import (
|
|||
//
|
||||
// Example client session:
|
||||
//
|
||||
// CONNECT http://server.com with subprotocol "channel.k8s.io"
|
||||
// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN)
|
||||
// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT)
|
||||
// CLOSE
|
||||
//
|
||||
// CONNECT http://server.com with subprotocol "channel.k8s.io"
|
||||
// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN)
|
||||
// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT)
|
||||
// CLOSE
|
||||
const ChannelWebSocketProtocol = "channel.k8s.io"
|
||||
|
||||
// The Websocket subprotocol "base64.channel.k8s.io" base64 encodes each message with a character
|
||||
|
@ -56,11 +55,10 @@ const ChannelWebSocketProtocol = "channel.k8s.io"
|
|||
//
|
||||
// Example client session:
|
||||
//
|
||||
// CONNECT http://server.com with subprotocol "base64.channel.k8s.io"
|
||||
// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN)
|
||||
// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT)
|
||||
// CLOSE
|
||||
//
|
||||
// CONNECT http://server.com with subprotocol "base64.channel.k8s.io"
|
||||
// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN)
|
||||
// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT)
|
||||
// CLOSE
|
||||
const Base64ChannelWebSocketProtocol = "base64.channel.k8s.io"
|
||||
|
||||
type codecType int
|
||||
|
|
|
@ -36,11 +36,11 @@ type x509MissingSANErrorMetricsRTWrapper struct {
|
|||
|
||||
// NewMissingSANRoundTripperWrapperConstructor returns a RoundTripper wrapper that's usable
|
||||
// within ClientConfig.Wrap that increases the `metricCounter` whenever:
|
||||
// 1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field`
|
||||
// which indicates an error caused by the deprecation of Common Name field when veryfing remote
|
||||
// hostname
|
||||
// 2. the server certificate in response contains no SAN. This indicates that this binary run
|
||||
// with the GODEBUG=x509ignoreCN=0 in env
|
||||
// 1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field`
|
||||
// which indicates an error caused by the deprecation of Common Name field when veryfing remote
|
||||
// hostname
|
||||
// 2. the server certificate in response contains no SAN. This indicates that this binary run
|
||||
// with the GODEBUG=x509ignoreCN=0 in env
|
||||
func NewMissingSANRoundTripperWrapperConstructor(metricCounter *metrics.Counter) func(rt http.RoundTripper) http.RoundTripper {
|
||||
return func(rt http.RoundTripper) http.RoundTripper {
|
||||
return &x509MissingSANErrorMetricsRTWrapper{
|
||||
|
|
|
@ -190,10 +190,10 @@ func (b *bufferedBackend) processIncomingEvents(stopCh <-chan struct{}) {
|
|||
// The following things can cause collectEvents to stop and return the list
|
||||
// of events:
|
||||
//
|
||||
// * Maximum number of events for a batch.
|
||||
// * Timer has passed.
|
||||
// * Buffer channel is closed and empty.
|
||||
// * stopCh is closed.
|
||||
// - Maximum number of events for a batch.
|
||||
// - Timer has passed.
|
||||
// - Buffer channel is closed and empty.
|
||||
// - stopCh is closed.
|
||||
func (b *bufferedBackend) collectEvents(timer <-chan time.Time, stopCh <-chan struct{}) []*auditinternal.Event {
|
||||
var events []*auditinternal.Event
|
||||
|
||||
|
|
|
@ -452,18 +452,18 @@ func (r *claimResolver) Verifier(iss string) (*oidc.IDTokenVerifier, error) {
|
|||
// OIDC Connect Core 1.0, section 5.6.2.
|
||||
// See: https://openid.net/specs/openid-connect-core-1_0.html#AggregatedDistributedClaims
|
||||
//
|
||||
// {
|
||||
// ... (other normal claims)...
|
||||
// "_claim_names": {
|
||||
// "groups": "src1"
|
||||
// },
|
||||
// "_claim_sources": {
|
||||
// "src1": {
|
||||
// "endpoint": "https://www.example.com",
|
||||
// "access_token": "f005ba11"
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
// {
|
||||
// ... (other normal claims)...
|
||||
// "_claim_names": {
|
||||
// "groups": "src1"
|
||||
// },
|
||||
// "_claim_sources": {
|
||||
// "src1": {
|
||||
// "endpoint": "https://www.example.com",
|
||||
// "access_token": "f005ba11"
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
func (r *claimResolver) expand(c claims) error {
|
||||
const (
|
||||
// The claim containing a map of endpoint references per claim.
|
||||
|
|
|
@ -77,19 +77,19 @@ func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1I
|
|||
// New creates a new WebhookAuthorizer from the provided kubeconfig file.
|
||||
// The config's cluster field is used to refer to the remote service, user refers to the returned authorizer.
|
||||
//
|
||||
// # clusters refers to the remote service.
|
||||
// clusters:
|
||||
// - name: name-of-remote-authz-service
|
||||
// cluster:
|
||||
// certificate-authority: /path/to/ca.pem # CA for verifying the remote service.
|
||||
// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'.
|
||||
// # clusters refers to the remote service.
|
||||
// clusters:
|
||||
// - name: name-of-remote-authz-service
|
||||
// cluster:
|
||||
// certificate-authority: /path/to/ca.pem # CA for verifying the remote service.
|
||||
// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'.
|
||||
//
|
||||
// # users refers to the API server's webhook configuration.
|
||||
// users:
|
||||
// - name: name-of-api-server
|
||||
// user:
|
||||
// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use
|
||||
// client-key: /path/to/key.pem # key matching the cert
|
||||
// # users refers to the API server's webhook configuration.
|
||||
// users:
|
||||
// - name: name-of-api-server
|
||||
// user:
|
||||
// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use
|
||||
// client-key: /path/to/key.pem # key matching the cert
|
||||
//
|
||||
// For additional HTTP configuration, refer to the kubeconfig documentation
|
||||
// https://kubernetes.io/docs/user-guide/kubeconfig-file/.
|
||||
|
@ -121,45 +121,45 @@ func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, un
|
|||
// serialized api.authorization.v1beta1.SubjectAccessReview object. An example request body is
|
||||
// provided below.
|
||||
//
|
||||
// {
|
||||
// "apiVersion": "authorization.k8s.io/v1beta1",
|
||||
// "kind": "SubjectAccessReview",
|
||||
// "spec": {
|
||||
// "resourceAttributes": {
|
||||
// "namespace": "kittensandponies",
|
||||
// "verb": "GET",
|
||||
// "group": "group3",
|
||||
// "resource": "pods"
|
||||
// },
|
||||
// "user": "jane",
|
||||
// "group": [
|
||||
// "group1",
|
||||
// "group2"
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
// {
|
||||
// "apiVersion": "authorization.k8s.io/v1beta1",
|
||||
// "kind": "SubjectAccessReview",
|
||||
// "spec": {
|
||||
// "resourceAttributes": {
|
||||
// "namespace": "kittensandponies",
|
||||
// "verb": "GET",
|
||||
// "group": "group3",
|
||||
// "resource": "pods"
|
||||
// },
|
||||
// "user": "jane",
|
||||
// "group": [
|
||||
// "group1",
|
||||
// "group2"
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The remote service is expected to fill the SubjectAccessReviewStatus field to either allow or
|
||||
// disallow access. A permissive response would return:
|
||||
//
|
||||
// {
|
||||
// "apiVersion": "authorization.k8s.io/v1beta1",
|
||||
// "kind": "SubjectAccessReview",
|
||||
// "status": {
|
||||
// "allowed": true
|
||||
// }
|
||||
// }
|
||||
// {
|
||||
// "apiVersion": "authorization.k8s.io/v1beta1",
|
||||
// "kind": "SubjectAccessReview",
|
||||
// "status": {
|
||||
// "allowed": true
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// To disallow access, the remote service would return:
|
||||
//
|
||||
// {
|
||||
// "apiVersion": "authorization.k8s.io/v1beta1",
|
||||
// "kind": "SubjectAccessReview",
|
||||
// "status": {
|
||||
// "allowed": false,
|
||||
// "reason": "user does not have read access to the namespace"
|
||||
// }
|
||||
// }
|
||||
// {
|
||||
// "apiVersion": "authorization.k8s.io/v1beta1",
|
||||
// "kind": "SubjectAccessReview",
|
||||
// "status": {
|
||||
// "allowed": false,
|
||||
// "reason": "user does not have read access to the namespace"
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// TODO(mikedanese): We should eventually support failing closed when we
|
||||
// encounter an error. We are failing open now to preserve backwards compatible
|
||||
|
@ -247,7 +247,7 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri
|
|||
|
||||
}
|
||||
|
||||
//TODO: need to finish the method to get the rules when using webhook mode
|
||||
// TODO: need to finish the method to get the rules when using webhook mode
|
||||
func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) {
|
||||
var (
|
||||
resourceRules []authorizer.ResourceRuleInfo
|
||||
|
|
Loading…
Reference in New Issue