remove apiserver dependency in resourceinterpreter

Signed-off-by: changzhen <changzhen5@huawei.com>
This commit is contained in:
changzhen 2024-12-24 17:40:54 +08:00
parent c9ca6aca6a
commit 5bd3d0128a
60 changed files with 231 additions and 9287 deletions

View File

@ -31,7 +31,6 @@ import (
webhookutil "k8s.io/apiserver/pkg/util/webhook"
corev1 "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
"k8s.io/kube-aggregator/pkg/apiserver"
utiltrace "k8s.io/utils/trace"
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
@ -70,7 +69,7 @@ func NewCustomizedInterpreter(informer genericmanager.SingleClusterInformerManag
}
cm.SetAuthenticationInfoResolver(authInfoResolver)
cm.SetServiceResolver(apiserver.NewClusterIPServiceResolver(serviceLister))
cm.SetServiceResolver(NewServiceResolver(serviceLister))
return &CustomizedInterpreter{
hookManager: configmanager.NewExploreConfigManager(informer),

View File

@ -0,0 +1,77 @@
package webhook
import (
"fmt"
"net"
"net/url"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
webhookutil "k8s.io/apiserver/pkg/util/webhook"
corev1lister "k8s.io/client-go/listers/core/v1"
)
// ServiceResolver knows how to convert a service reference into an actual location.
type ServiceResolver interface {
ResolveEndpoint(namespace, name string, port int32) (*url.URL, error)
}
// NewServiceResolver returns a ServiceResolver that parses service first,
// if service not exist, constructs a service URL from a given namespace and name.
func NewServiceResolver(services corev1lister.ServiceLister) ServiceResolver {
return &serviceResolver{
services: services,
defaultResolver: webhookutil.NewDefaultServiceResolver(),
}
}
type serviceResolver struct {
services corev1lister.ServiceLister
defaultResolver ServiceResolver
}
func (r *serviceResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) {
svc, err := r.services.Services(namespace).Get(name)
if err != nil {
if apierrors.IsNotFound(err) {
return r.defaultResolver.ResolveEndpoint(namespace, name, port)
}
return nil, err
}
return resolveCluster(svc, port)
}
// resolveCluster parses Service resource to url.
func resolveCluster(svc *corev1.Service, port int32) (*url.URL, error) {
switch {
case svc.Spec.Type == corev1.ServiceTypeClusterIP && svc.Spec.ClusterIP == corev1.ClusterIPNone:
return nil, fmt.Errorf(`cannot route to service with ClusterIP "None"`)
// use IP from a clusterIP for these service types
case svc.Spec.Type == corev1.ServiceTypeClusterIP, svc.Spec.Type == corev1.ServiceTypeLoadBalancer, svc.Spec.Type == corev1.ServiceTypeNodePort:
svcPort, err := findServicePort(svc, port)
if err != nil {
return nil, err
}
return &url.URL{
Scheme: "https",
Host: net.JoinHostPort(svc.Spec.ClusterIP, fmt.Sprintf("%d", svcPort.Port)),
}, nil
case svc.Spec.Type == corev1.ServiceTypeExternalName:
return &url.URL{
Scheme: "https",
Host: net.JoinHostPort(svc.Spec.ExternalName, fmt.Sprintf("%d", port)),
}, nil
default:
return nil, fmt.Errorf("unsupported service type %q", svc.Spec.Type)
}
}
// findServicePort finds the service port by name or numerically.
func findServicePort(svc *corev1.Service, port int32) (*corev1.ServicePort, error) {
for _, svcPort := range svc.Spec.Ports {
if svcPort.Port == port {
return &svcPort, nil
}
}
return nil, apierrors.NewServiceUnavailable(fmt.Sprintf("no service port %d found for service %q", port, svc.Name))
}

View File

@ -0,0 +1,153 @@
package webhook
import (
"fmt"
"net"
"net/url"
"testing"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func Test_resolveCluster(t *testing.T) {
type args struct {
svc *corev1.Service
port int32
}
tests := []struct {
name string
args args
want *url.URL
wantErr assert.ErrorAssertionFunc
}{
{
name: "ClusterIP service without expect port, can not be resolved",
args: args{
svc: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alfa"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "10.10.10.10",
Ports: []corev1.ServicePort{
{Port: 1234, TargetPort: intstr.FromInt32(1234)},
}}},
port: 443,
},
wantErr: assert.Error,
},
{
name: "ClusterIP service, can be resolved",
args: args{
svc: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alfa"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: "10.10.10.10",
Ports: []corev1.ServicePort{
{Name: "https", Port: 443, TargetPort: intstr.FromInt32(1443)},
{Port: 1234, TargetPort: intstr.FromInt32(1234)},
}}},
port: 443,
},
want: &url.URL{
Scheme: "https",
Host: net.JoinHostPort("10.10.10.10", "443"),
},
wantErr: assert.NoError,
},
{
name: "headless service, can not be resolved",
args: args{
svc: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alfa"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
ClusterIP: corev1.ClusterIPNone,
}},
port: 443,
},
wantErr: assert.Error,
},
{
name: "LoadBalancer service, can be resolved",
args: args{
svc: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alfa"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeLoadBalancer,
ClusterIP: "10.10.10.10",
Ports: []corev1.ServicePort{
{Name: "https", Port: 443, TargetPort: intstr.FromInt32(1443)},
{Port: 1234, TargetPort: intstr.FromInt32(1234)},
}}},
port: 443,
},
want: &url.URL{
Scheme: "https",
Host: net.JoinHostPort("10.10.10.10", "443"),
},
wantErr: assert.NoError,
},
{
name: "NodePort service, can be resolved",
args: args{
svc: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alfa"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeLoadBalancer,
ClusterIP: "10.10.10.10",
Ports: []corev1.ServicePort{
{Name: "https", Port: 443, TargetPort: intstr.FromInt32(1443)},
{Port: 1234, TargetPort: intstr.FromInt32(1234)},
}}},
port: 443,
},
want: &url.URL{
Scheme: "https",
Host: net.JoinHostPort("10.10.10.10", "443"),
},
wantErr: assert.NoError,
},
{
name: "ExternalName service, can be resolved",
args: args{
svc: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alfa"},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeExternalName,
ExternalName: "foo.bar.com",
}},
port: 443,
},
want: &url.URL{
Scheme: "https",
Host: net.JoinHostPort("foo.bar.com", "443"),
},
wantErr: assert.NoError,
},
{
name: "unsupported type service, can not be resolved",
args: args{
svc: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alfa"},
Spec: corev1.ServiceSpec{
Type: "unsupported service",
}},
port: 443,
},
wantErr: assert.Error,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := resolveCluster(tt.args.svc, tt.args.port)
if !tt.wantErr(t, err, fmt.Sprintf("resolveCluster(%v, %v)", tt.args.svc, tt.args.port)) {
return
}
assert.Equalf(t, tt.want, got, "resolveCluster(%v, %v)", tt.args.svc, tt.args.port)
})
}
}

View File

@ -1,70 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package table
import (
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/duration"
)
// MetaToTableRow converts a list or object into one or more table rows. The provided rowFn is invoked for
// each accessed item, with name and age being passed to each.
func MetaToTableRow(obj runtime.Object, rowFn func(obj runtime.Object, m metav1.Object, name, age string) ([]interface{}, error)) ([]metav1.TableRow, error) {
if meta.IsListType(obj) {
rows := make([]metav1.TableRow, 0, 16)
err := meta.EachListItem(obj, func(obj runtime.Object) error {
nestedRows, err := MetaToTableRow(obj, rowFn)
if err != nil {
return err
}
rows = append(rows, nestedRows...)
return nil
})
if err != nil {
return nil, err
}
return rows, nil
}
rows := make([]metav1.TableRow, 0, 1)
m, err := meta.Accessor(obj)
if err != nil {
return nil, err
}
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
row.Cells, err = rowFn(obj, m, m.GetName(), ConvertToHumanReadableDateType(m.GetCreationTimestamp()))
if err != nil {
return nil, err
}
rows = append(rows, row)
return rows, nil
}
// ConvertToHumanReadableDateType returns the elapsed time since timestamp in
// human-readable approximation.
func ConvertToHumanReadableDateType(timestamp metav1.Time) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.HumanDuration(time.Since(timestamp.Time))
}

View File

@ -1,366 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconcilers
import (
"context"
"fmt"
"net"
"net/http"
"path"
"strconv"
"sync"
"sync/atomic"
"time"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory"
)
const (
APIServerIdentityLabel = "apiserverIdentity"
)
type PeerAdvertiseAddress struct {
PeerAdvertiseIP string
PeerAdvertisePort string
}
type peerEndpointLeases struct {
storage storage.Interface
destroyFn func()
baseKey string
leaseTime time.Duration
}
type PeerEndpointLeaseReconciler interface {
// GetEndpoint retrieves the endpoint for a given apiserverId
GetEndpoint(serverId string) (string, error)
// UpdateLease updates the ip and port of peer servers
UpdateLease(serverId string, ip string, endpointPorts []corev1.EndpointPort) error
// RemoveEndpoints removes this apiserver's peer endpoint lease.
RemoveLease(serverId string) error
// Destroy cleans up everything on shutdown.
Destroy()
// StopReconciling turns any later ReconcileEndpoints call into a noop.
StopReconciling()
}
type peerEndpointLeaseReconciler struct {
serverLeases *peerEndpointLeases
stopReconcilingCalled atomic.Bool
}
// NewPeerEndpointLeaseReconciler creates a new peer endpoint lease reconciler
func NewPeerEndpointLeaseReconciler(config *storagebackend.ConfigForResource, baseKey string, leaseTime time.Duration) (PeerEndpointLeaseReconciler, error) {
// note that newFunc, newListFunc and resourcePrefix
// can be left blank unless the storage.Watch method is used
leaseStorage, destroyFn, err := storagefactory.Create(*config, nil, nil, "")
if err != nil {
return nil, fmt.Errorf("error creating storage factory: %v", err)
}
var once sync.Once
return &peerEndpointLeaseReconciler{
serverLeases: &peerEndpointLeases{
storage: leaseStorage,
destroyFn: func() { once.Do(destroyFn) },
baseKey: baseKey,
leaseTime: leaseTime,
},
}, nil
}
// PeerEndpointController is the controller manager for updating the peer endpoint leases.
// This provides a separate independent reconciliation loop for peer endpoint leases
// which ensures that the peer kube-apiservers are fetching the updated endpoint info for a given apiserver
// in the case when the peer wants to proxy the request to the given apiserver because it can not serve the
// request itself due to version mismatch.
type PeerEndpointLeaseController struct {
reconciler PeerEndpointLeaseReconciler
endpointInterval time.Duration
serverId string
// peeraddress stores the IP and port of this kube-apiserver. Used by peer kube-apiservers to
// route request to this apiserver in case of a version skew.
peeraddress string
client kubernetes.Interface
lock sync.Mutex
stopCh chan struct{} // closed by Stop()
}
func New(serverId string, peeraddress string,
reconciler PeerEndpointLeaseReconciler, endpointInterval time.Duration, client kubernetes.Interface) *PeerEndpointLeaseController {
return &PeerEndpointLeaseController{
reconciler: reconciler,
serverId: serverId,
// peeraddress stores the IP and port of this kube-apiserver. Used by peer kube-apiservers to
// route request to this apiserver in case of a version skew.
peeraddress: peeraddress,
endpointInterval: endpointInterval,
client: client,
stopCh: make(chan struct{}),
}
}
// Start begins the peer endpoint lease reconciler loop that must exist for bootstrapping
// a cluster.
func (c *PeerEndpointLeaseController) Start(stopCh <-chan struct{}) {
localStopCh := make(chan struct{})
go func() {
defer close(localStopCh)
select {
case <-stopCh: // from Start
case <-c.stopCh: // from Stop
}
}()
go c.Run(localStopCh)
}
// RunPeerEndpointReconciler periodically updates the peer endpoint leases
func (c *PeerEndpointLeaseController) Run(stopCh <-chan struct{}) {
// wait until process is ready
wait.PollImmediateUntil(100*time.Millisecond, func() (bool, error) {
var code int
c.client.CoreV1().RESTClient().Get().AbsPath("/readyz").Do(context.TODO()).StatusCode(&code)
return code == http.StatusOK, nil
}, stopCh)
wait.NonSlidingUntil(func() {
if err := c.UpdatePeerEndpointLeases(); err != nil {
runtime.HandleError(fmt.Errorf("unable to update peer endpoint leases: %v", err))
}
}, c.endpointInterval, stopCh)
}
// Stop cleans up this apiserver's peer endpoint leases.
func (c *PeerEndpointLeaseController) Stop() {
c.lock.Lock()
defer c.lock.Unlock()
select {
case <-c.stopCh:
return // only close once
default:
close(c.stopCh)
}
finishedReconciling := make(chan struct{})
go func() {
defer close(finishedReconciling)
klog.Infof("Shutting down peer endpoint lease reconciler")
// stop reconciliation
c.reconciler.StopReconciling()
// Ensure that there will be no race condition with the ReconcileEndpointLeases.
if err := c.reconciler.RemoveLease(c.serverId); err != nil {
klog.Errorf("Unable to remove peer endpoint leases: %v", err)
}
c.reconciler.Destroy()
}()
select {
case <-finishedReconciling:
// done
case <-time.After(2 * c.endpointInterval):
// don't block server shutdown forever if we can't reach etcd to remove ourselves
klog.Warning("peer_endpoint_controller's RemoveEndpoints() timed out")
}
}
// UpdatePeerEndpointLeases attempts to update the peer endpoint leases.
func (c *PeerEndpointLeaseController) UpdatePeerEndpointLeases() error {
host, port, err := net.SplitHostPort(c.peeraddress)
if err != nil {
return err
}
p, err := strconv.Atoi(port)
if err != nil {
return err
}
endpointPorts := createEndpointPortSpec(p, "https")
// Ensure that there will be no race condition with the RemoveEndpointLeases.
c.lock.Lock()
defer c.lock.Unlock()
// Refresh the TTL on our key, independently of whether any error or
// update conflict happens below. This makes sure that at least some of
// the servers will add our endpoint lease.
if err := c.reconciler.UpdateLease(c.serverId, host, endpointPorts); err != nil {
return err
}
return nil
}
// UpdateLease resets the TTL on a server IP in storage
// UpdateLease will create a new key if it doesn't exist.
// We use the first element in endpointPorts as a part of the lease's base key
// This is done to support out tests that simulate 2 apiservers running on the same ip but
// different ports
// It will also do the following if UnknownVersionInteroperabilityProxy feature is enabled
// 1. store the apiserverId as a label
// 2. store the values passed to --peer-advertise-ip and --peer-advertise-port flags to kube-apiserver as an annotation
// with value of format <ip:port>
func (r *peerEndpointLeaseReconciler) UpdateLease(serverId string, ip string, endpointPorts []corev1.EndpointPort) error {
// reconcile endpoints only if apiserver was not shutdown
if r.stopReconcilingCalled.Load() {
return nil
}
// we use the serverID as the key to avoid using the server IP, port as the key.
// note: this means that this lease doesn't enforce mutual exclusion of ip/port usage between apiserver.
key := path.Join(r.serverLeases.baseKey, serverId)
return r.serverLeases.storage.GuaranteedUpdate(apirequest.NewDefaultContext(), key, &corev1.Endpoints{}, true, nil, func(input kruntime.Object, respMeta storage.ResponseMeta) (kruntime.Object, *uint64, error) {
existing := input.(*corev1.Endpoints)
existing.Subsets = []corev1.EndpointSubset{
{
Addresses: []corev1.EndpointAddress{{IP: ip}},
Ports: endpointPorts,
},
}
// store this server's identity (serverId) as a label. This will be used by
// peers to find the IP of this server when the peer can not serve a request
// due to version skew.
if existing.Labels == nil {
existing.Labels = map[string]string{}
}
existing.Labels[APIServerIdentityLabel] = serverId
// leaseTime needs to be in seconds
leaseTime := uint64(r.serverLeases.leaseTime / time.Second)
// NB: GuaranteedUpdate does not perform the store operation unless
// something changed between load and store (not including resource
// version), meaning we can't refresh the TTL without actually
// changing a field.
existing.Generation++
klog.V(6).Infof("Resetting TTL on server IP %q listed in storage to %v", ip, leaseTime)
return existing, &leaseTime, nil
}, nil)
}
// ListLeases retrieves a list of the current server IPs from storage
func (r *peerEndpointLeaseReconciler) ListLeases() ([]string, error) {
storageOpts := storage.ListOptions{
ResourceVersion: "0",
ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan,
Predicate: storage.Everything,
Recursive: true,
}
ipInfoList, err := r.getIpInfoList(storageOpts)
if err != nil {
return nil, err
}
ipList := make([]string, 0, len(ipInfoList.Items))
for _, ip := range ipInfoList.Items {
if len(ip.Subsets) > 0 && len(ip.Subsets[0].Addresses) > 0 && len(ip.Subsets[0].Addresses[0].IP) > 0 {
ipList = append(ipList, ip.Subsets[0].Addresses[0].IP)
}
}
klog.V(6).Infof("Current server IPs listed in storage are %v", ipList)
return ipList, nil
}
// GetLease retrieves the server IP and port for a specific server id
func (r *peerEndpointLeaseReconciler) GetLease(serverId string) (string, error) {
var fullAddr string
if serverId == "" {
return "", fmt.Errorf("error getting endpoint for serverId: empty serverId")
}
storageOpts := storage.ListOptions{
ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan,
Predicate: storage.Everything,
Recursive: true,
}
ipInfoList, err := r.getIpInfoList(storageOpts)
if err != nil {
return "", err
}
for _, ip := range ipInfoList.Items {
if ip.Labels[APIServerIdentityLabel] == serverId {
if len(ip.Subsets) > 0 {
var ipStr, portStr string
if len(ip.Subsets[0].Addresses) > 0 {
if len(ip.Subsets[0].Addresses[0].IP) > 0 {
ipStr = ip.Subsets[0].Addresses[0].IP
}
}
if len(ip.Subsets[0].Ports) > 0 {
portStr = fmt.Sprint(ip.Subsets[0].Ports[0].Port)
}
fullAddr = net.JoinHostPort(ipStr, portStr)
break
}
}
}
klog.V(6).Infof("Fetched this server IP for the specified apiserverId %v, %v", serverId, fullAddr)
return fullAddr, nil
}
func (r *peerEndpointLeaseReconciler) StopReconciling() {
r.stopReconcilingCalled.Store(true)
}
// RemoveLease removes the lease on a server IP in storage
// We use the first element in endpointPorts as a part of the lease's base key
// This is done to support out tests that simulate 2 apiservers running on the same ip but
// different ports
func (r *peerEndpointLeaseReconciler) RemoveLease(serverId string) error {
key := path.Join(r.serverLeases.baseKey, serverId)
return r.serverLeases.storage.Delete(apirequest.NewDefaultContext(), key, &corev1.Endpoints{}, nil, rest.ValidateAllObjectFunc, nil)
}
func (r *peerEndpointLeaseReconciler) Destroy() {
r.serverLeases.destroyFn()
}
func (r *peerEndpointLeaseReconciler) GetEndpoint(serverId string) (string, error) {
return r.GetLease(serverId)
}
func (r *peerEndpointLeaseReconciler) getIpInfoList(storageOpts storage.ListOptions) (*corev1.EndpointsList, error) {
ipInfoList := &corev1.EndpointsList{}
if err := r.serverLeases.storage.GetList(apirequest.NewDefaultContext(), r.serverLeases.baseKey, storageOpts, ipInfoList); err != nil {
return nil, err
}
return ipInfoList, nil
}
// createEndpointPortSpec creates the endpoint ports
func createEndpointPortSpec(endpointPort int, endpointPortName string) []corev1.EndpointPort {
return []corev1.EndpointPort{{
Protocol: corev1.ProtocolTCP,
Port: int32(endpointPort),
Name: endpointPortName,
}}
}

View File

@ -1,36 +0,0 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Among other files, this directory contains functionality for two
// stream proxies: streamtranslator.go and streamtunnel.go. Both of
// these proxies allow the inter-connection of WebSocket and SPDY
// streaming connections.
//
// The stream translator proxy is used for the RemoteCommand
// subprotocol (e.g. kubectl exec, cp, and attach), and it connects
// the output streams of a WebSocket connection (e.g. STDIN, STDOUT,
// STDERR, TTY resize, and error streams) to the input streams of a
// SPDY connection.
//
// The stream tunnel proxy tunnels SPDY frames through a WebSocket
// connection, and it is used for the PortForward subprotocol (e.g.
// kubectl port-forward). This proxy implements tunneling by transparently
// encoding and decoding SPDY framed data into and out of the payload of a
// WebSocket data frame. The primary structure for this tunneling is
// the TunnelingConnection. A lot of the other code in streamtunnel.go
// is for properly upgrading both the upstream SPDY connection and the
// downstream WebSocket connection before streaming begins.
package proxy // import "k8s.io/apiserver/pkg/util/proxy"

View File

@ -1,79 +0,0 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"context"
"sync"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
const (
subsystem = "apiserver"
statuscode = "code"
)
var registerMetricsOnce sync.Once
var (
// streamTranslatorRequestsTotal counts the number of requests that were handled by
// the StreamTranslatorProxy (RemoteCommand subprotocol).
streamTranslatorRequestsTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: subsystem,
Name: "stream_translator_requests_total",
Help: "Total number of requests that were handled by the StreamTranslatorProxy, which processes streaming RemoteCommand/V5",
StabilityLevel: metrics.ALPHA,
},
[]string{statuscode},
)
// streamTunnelRequestsTotal counts the number of requests that were handled by
// the StreamTunnelProxy (PortForward subprotocol).
streamTunnelRequestsTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: subsystem,
Name: "stream_tunnel_requests_total",
Help: "Total number of requests that were handled by the StreamTunnelProxy, which processes streaming PortForward/V2",
StabilityLevel: metrics.ALPHA,
},
[]string{statuscode},
)
)
func Register() {
registerMetricsOnce.Do(func() {
legacyregistry.MustRegister(streamTranslatorRequestsTotal)
legacyregistry.MustRegister(streamTunnelRequestsTotal)
})
}
func ResetForTest() {
streamTranslatorRequestsTotal.Reset()
streamTunnelRequestsTotal.Reset()
}
// IncStreamTranslatorRequest increments the # of requests handled by the StreamTranslatorProxy.
func IncStreamTranslatorRequest(ctx context.Context, status string) {
streamTranslatorRequestsTotal.WithContext(ctx).WithLabelValues(status).Add(1)
}
// IncStreamTunnelRequest increments the # of requests handled by the StreamTunnelProxy.
func IncStreamTunnelRequest(ctx context.Context, status string) {
streamTunnelRequestsTotal.WithContext(ctx).WithLabelValues(status).Add(1)
}

View File

@ -1,163 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"context"
"fmt"
"math/rand"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
utilnet "k8s.io/apimachinery/pkg/util/net"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/audit"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
listersv1 "k8s.io/client-go/listers/core/v1"
)
const (
// taken from https://github.com/kubernetes/kubernetes/blob/release-1.27/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go#L47
aggregatedDiscoveryTimeout = 5 * time.Second
)
// findServicePort finds the service port by name or numerically.
func findServicePort(svc *v1.Service, port int32) (*v1.ServicePort, error) {
for _, svcPort := range svc.Spec.Ports {
if svcPort.Port == port {
return &svcPort, nil
}
}
return nil, errors.NewServiceUnavailable(fmt.Sprintf("no service port %d found for service %q", port, svc.Name))
}
// ResolveEndpoint returns a URL to which one can send traffic for the specified service.
func ResolveEndpoint(services listersv1.ServiceLister, endpoints listersv1.EndpointsLister, namespace, id string, port int32) (*url.URL, error) {
svc, err := services.Services(namespace).Get(id)
if err != nil {
return nil, err
}
switch {
case svc.Spec.Type == v1.ServiceTypeClusterIP, svc.Spec.Type == v1.ServiceTypeLoadBalancer, svc.Spec.Type == v1.ServiceTypeNodePort:
// these are fine
default:
return nil, fmt.Errorf("unsupported service type %q", svc.Spec.Type)
}
svcPort, err := findServicePort(svc, port)
if err != nil {
return nil, err
}
eps, err := endpoints.Endpoints(namespace).Get(svc.Name)
if err != nil {
return nil, err
}
if len(eps.Subsets) == 0 {
return nil, errors.NewServiceUnavailable(fmt.Sprintf("no endpoints available for service %q", svc.Name))
}
// Pick a random Subset to start searching from.
ssSeed := rand.Intn(len(eps.Subsets))
// Find a Subset that has the port.
for ssi := 0; ssi < len(eps.Subsets); ssi++ {
ss := &eps.Subsets[(ssSeed+ssi)%len(eps.Subsets)]
if len(ss.Addresses) == 0 {
continue
}
for i := range ss.Ports {
if ss.Ports[i].Name == svcPort.Name {
// Pick a random address.
ip := ss.Addresses[rand.Intn(len(ss.Addresses))].IP
port := int(ss.Ports[i].Port)
return &url.URL{
Scheme: "https",
Host: net.JoinHostPort(ip, strconv.Itoa(port)),
}, nil
}
}
}
return nil, errors.NewServiceUnavailable(fmt.Sprintf("no endpoints available for service %q", id))
}
func ResolveCluster(services listersv1.ServiceLister, namespace, id string, port int32) (*url.URL, error) {
svc, err := services.Services(namespace).Get(id)
if err != nil {
return nil, err
}
switch {
case svc.Spec.Type == v1.ServiceTypeClusterIP && svc.Spec.ClusterIP == v1.ClusterIPNone:
return nil, fmt.Errorf(`cannot route to service with ClusterIP "None"`)
// use IP from a clusterIP for these service types
case svc.Spec.Type == v1.ServiceTypeClusterIP, svc.Spec.Type == v1.ServiceTypeLoadBalancer, svc.Spec.Type == v1.ServiceTypeNodePort:
svcPort, err := findServicePort(svc, port)
if err != nil {
return nil, err
}
return &url.URL{
Scheme: "https",
Host: net.JoinHostPort(svc.Spec.ClusterIP, fmt.Sprintf("%d", svcPort.Port)),
}, nil
case svc.Spec.Type == v1.ServiceTypeExternalName:
return &url.URL{
Scheme: "https",
Host: net.JoinHostPort(svc.Spec.ExternalName, fmt.Sprintf("%d", port)),
}, nil
default:
return nil, fmt.Errorf("unsupported service type %q", svc.Spec.Type)
}
}
// NewRequestForProxy returns a shallow copy of the original request with a context that may include a timeout for discovery requests
func NewRequestForProxy(location *url.URL, req *http.Request) (*http.Request, context.CancelFunc) {
newCtx := req.Context()
cancelFn := func() {}
if requestInfo, ok := genericapirequest.RequestInfoFrom(req.Context()); ok {
// trim leading and trailing slashes. Then "/apis/group/version" requests are for discovery, so if we have exactly three
// segments that we are going to proxy, we have a discovery request.
if !requestInfo.IsResourceRequest && len(strings.Split(strings.Trim(requestInfo.Path, "/"), "/")) == 3 {
// discovery requests are used by kubectl and others to determine which resources a server has. This is a cheap call that
// should be fast for every aggregated apiserver. Latency for aggregation is expected to be low (as for all extensions)
// so forcing a short timeout here helps responsiveness of all clients.
newCtx, cancelFn = context.WithTimeout(newCtx, aggregatedDiscoveryTimeout)
}
}
// WithContext creates a shallow clone of the request with the same context.
newReq := req.WithContext(newCtx)
newReq.Header = utilnet.CloneHeader(req.Header)
newReq.URL = location
newReq.Host = location.Host
// If the original request has an audit ID, let's make sure we propagate this
// to the aggregated server.
if auditID, found := audit.AuditIDFrom(req.Context()); found {
newReq.Header.Set(auditinternal.HeaderAuditID, string(auditID))
}
return newReq, cancelFn
}

View File

@ -1,180 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"fmt"
"net/http"
"net/url"
"strconv"
"github.com/mxk/go-flowrate/flowrate"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/httpstream/spdy"
constants "k8s.io/apimachinery/pkg/util/remotecommand"
"k8s.io/apiserver/pkg/util/proxy/metrics"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/util/exec"
)
// StreamTranslatorHandler is a handler which translates WebSocket stream data
// to SPDY to proxy to kubelet (and ContainerRuntime).
type StreamTranslatorHandler struct {
// Location is the location of the upstream proxy. It is used as the location to Dial on the upstream server
// for upgrade requests.
Location *url.URL
// Transport provides an optional round tripper to use to proxy. If nil, the default proxy transport is used
Transport http.RoundTripper
// MaxBytesPerSec throttles stream Reader/Writer if necessary
MaxBytesPerSec int64
// Options define the requested streams (e.g. stdin, stdout).
Options Options
}
// NewStreamTranslatorHandler creates a new proxy handler. Responder is required for returning
// errors to the caller.
func NewStreamTranslatorHandler(location *url.URL, transport http.RoundTripper, maxBytesPerSec int64, opts Options) *StreamTranslatorHandler {
return &StreamTranslatorHandler{
Location: location,
Transport: transport,
MaxBytesPerSec: maxBytesPerSec,
Options: opts,
}
}
func (h *StreamTranslatorHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Create WebSocket server, including particular streams requested. If this websocket
// endpoint is not able to be upgraded, the websocket library will return errors
// to the client.
websocketStreams, err := webSocketServerStreams(req, w, h.Options)
if err != nil {
// Client error increments bad request status code.
metrics.IncStreamTranslatorRequest(req.Context(), strconv.Itoa(http.StatusBadRequest))
return
}
defer websocketStreams.conn.Close()
// Creating SPDY executor, ensuring redirects are not followed.
spdyRoundTripper, err := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{UpgradeTransport: h.Transport})
if err != nil {
websocketStreams.writeStatus(apierrors.NewInternalError(err)) //nolint:errcheck
metrics.IncStreamTranslatorRequest(req.Context(), strconv.Itoa(http.StatusInternalServerError))
return
}
spdyExecutor, err := remotecommand.NewSPDYExecutorRejectRedirects(spdyRoundTripper, spdyRoundTripper, "POST", h.Location)
if err != nil {
websocketStreams.writeStatus(apierrors.NewInternalError(err)) //nolint:errcheck
metrics.IncStreamTranslatorRequest(req.Context(), strconv.Itoa(http.StatusInternalServerError))
return
}
// Wire the WebSocket server streams output to the SPDY client input. The stdin/stdout/stderr streams
// can be throttled if the transfer rate exceeds the "MaxBytesPerSec" (zero means unset). Throttling
// the streams instead of the underlying connection *may* not perform the same if two streams
// traveling the same direction (e.g. stdout, stderr) are being maxed out.
opts := remotecommand.StreamOptions{}
if h.Options.Stdin {
stdin := websocketStreams.stdinStream
if h.MaxBytesPerSec > 0 {
stdin = flowrate.NewReader(stdin, h.MaxBytesPerSec)
}
opts.Stdin = stdin
}
if h.Options.Stdout {
stdout := websocketStreams.stdoutStream
if h.MaxBytesPerSec > 0 {
stdout = flowrate.NewWriter(stdout, h.MaxBytesPerSec)
}
opts.Stdout = stdout
}
if h.Options.Stderr {
stderr := websocketStreams.stderrStream
if h.MaxBytesPerSec > 0 {
stderr = flowrate.NewWriter(stderr, h.MaxBytesPerSec)
}
opts.Stderr = stderr
}
if h.Options.Tty {
opts.Tty = true
opts.TerminalSizeQueue = &translatorSizeQueue{resizeChan: websocketStreams.resizeChan}
}
// Start the SPDY client with connected streams. Output from the WebSocket server
// streams will be forwarded into the SPDY client. Report SPDY execution errors
// through the websocket error stream.
err = spdyExecutor.StreamWithContext(req.Context(), opts)
if err != nil {
//nolint:errcheck // Ignore writeStatus returned error
if statusErr, ok := err.(*apierrors.StatusError); ok {
websocketStreams.writeStatus(statusErr)
// Increment status code returned within status error.
metrics.IncStreamTranslatorRequest(req.Context(), strconv.Itoa(int(statusErr.Status().Code)))
} else if exitErr, ok := err.(exec.CodeExitError); ok && exitErr.Exited() {
websocketStreams.writeStatus(codeExitToStatusError(exitErr))
// Returned an exit code from the container, so not an error in
// stream translator--add StatusOK to metrics.
metrics.IncStreamTranslatorRequest(req.Context(), strconv.Itoa(http.StatusOK))
} else {
websocketStreams.writeStatus(apierrors.NewInternalError(err))
metrics.IncStreamTranslatorRequest(req.Context(), strconv.Itoa(http.StatusInternalServerError))
}
return
}
// Write the success status back to the WebSocket client.
//nolint:errcheck
websocketStreams.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{
Status: metav1.StatusSuccess,
}})
metrics.IncStreamTranslatorRequest(req.Context(), strconv.Itoa(http.StatusOK))
}
// translatorSizeQueue feeds the size events from the WebSocket
// resizeChan into the SPDY client input. Implements TerminalSizeQueue
// interface.
type translatorSizeQueue struct {
resizeChan chan remotecommand.TerminalSize
}
func (t *translatorSizeQueue) Next() *remotecommand.TerminalSize {
size, ok := <-t.resizeChan
if !ok {
return nil
}
return &size
}
// codeExitToStatusError converts a passed CodeExitError to the type necessary
// to send through an error stream using "writeStatus".
func codeExitToStatusError(exitErr exec.CodeExitError) *apierrors.StatusError {
rc := exitErr.ExitStatus()
return &apierrors.StatusError{
ErrStatus: metav1.Status{
Status: metav1.StatusFailure,
Reason: constants.NonZeroExitCodeReason,
Details: &metav1.StatusDetails{
Causes: []metav1.StatusCause{
{
Type: constants.ExitCodeCauseType,
Message: fmt.Sprintf("%d", rc),
},
},
},
Message: fmt.Sprintf("command terminated with non-zero exit code: %v", exitErr),
},
}
}

View File

@ -1,470 +0,0 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
gwebsocket "github.com/gorilla/websocket"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/httpstream/spdy"
"k8s.io/apimachinery/pkg/util/httpstream/wsstream"
utilnet "k8s.io/apimachinery/pkg/util/net"
constants "k8s.io/apimachinery/pkg/util/portforward"
"k8s.io/apiserver/pkg/util/proxy/metrics"
"k8s.io/client-go/tools/portforward"
"k8s.io/klog/v2"
)
// TunnelingHandler is a handler which tunnels SPDY through WebSockets.
type TunnelingHandler struct {
// Used to communicate between upstream SPDY and downstream tunnel.
upgradeHandler http.Handler
}
// NewTunnelingHandler is used to create the tunnel between an upstream
// SPDY connection and a downstream tunneling connection through the stored
// UpgradeAwareProxy.
func NewTunnelingHandler(upgradeHandler http.Handler) *TunnelingHandler {
return &TunnelingHandler{upgradeHandler: upgradeHandler}
}
// ServeHTTP uses the upgradeHandler to tunnel between a downstream tunneling
// connection and an upstream SPDY connection. The tunneling connection is
// a wrapped WebSockets connection which communicates SPDY framed data. In the
// case the upstream upgrade fails, we delegate communication to the passed
// in "w" ResponseWriter.
func (h *TunnelingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
klog.V(4).Infoln("TunnelingHandler ServeHTTP")
spdyProtocols := spdyProtocolsFromWebsocketProtocols(req)
if len(spdyProtocols) == 0 {
metrics.IncStreamTunnelRequest(req.Context(), strconv.Itoa(http.StatusBadRequest))
http.Error(w, "unable to upgrade: no tunneling spdy protocols provided", http.StatusBadRequest)
return
}
spdyRequest := createSPDYRequest(req, spdyProtocols...)
// The fields "w" and "conn" are mutually exclusive. Either a successful upgrade occurs
// and the "conn" is hijacked and used in the subsequent upgradeHandler, or
// the upgrade failed, and "w" is the delegate used for the non-upgrade response.
writer := &tunnelingResponseWriter{
// "w" is used in the non-upgrade error cases called in the upgradeHandler.
w: w,
// "conn" is returned in the successful upgrade case when hijacked in the upgradeHandler.
conn: &headerInterceptingConn{
initializableConn: &tunnelingWebsocketUpgraderConn{
w: w,
req: req,
},
},
}
klog.V(4).Infoln("Tunnel spdy through websockets using the UpgradeAwareProxy")
h.upgradeHandler.ServeHTTP(writer, spdyRequest)
}
// createSPDYRequest modifies the passed request to remove
// WebSockets headers and add SPDY upgrade information, including
// spdy protocols acceptable to the client.
func createSPDYRequest(req *http.Request, spdyProtocols ...string) *http.Request {
clone := utilnet.CloneRequest(req)
// Clean up the websocket headers from the http request.
clone.Header.Del(wsstream.WebSocketProtocolHeader)
clone.Header.Del("Sec-Websocket-Key")
clone.Header.Del("Sec-Websocket-Version")
clone.Header.Del(httpstream.HeaderUpgrade)
// Update the http request for an upstream SPDY upgrade.
clone.Method = "POST"
clone.Body = nil // Remove the request body which is unused.
clone.Header.Set(httpstream.HeaderUpgrade, spdy.HeaderSpdy31)
clone.Header.Del(httpstream.HeaderProtocolVersion)
for i := range spdyProtocols {
clone.Header.Add(httpstream.HeaderProtocolVersion, spdyProtocols[i])
}
return clone
}
// spdyProtocolsFromWebsocketProtocols returns a list of spdy protocols by filtering
// to Kubernetes websocket subprotocols prefixed with "SPDY/3.1+", then removing the prefix
func spdyProtocolsFromWebsocketProtocols(req *http.Request) []string {
var spdyProtocols []string
for _, protocol := range gwebsocket.Subprotocols(req) {
if strings.HasPrefix(protocol, constants.WebsocketsSPDYTunnelingPrefix) && strings.HasSuffix(protocol, constants.KubernetesSuffix) {
spdyProtocols = append(spdyProtocols, strings.TrimPrefix(protocol, constants.WebsocketsSPDYTunnelingPrefix))
}
}
return spdyProtocols
}
var _ http.ResponseWriter = &tunnelingResponseWriter{}
var _ http.Hijacker = &tunnelingResponseWriter{}
// tunnelingResponseWriter implements the http.ResponseWriter and http.Hijacker interfaces.
// Only non-upgrade responses can be written using WriteHeader() and Write().
// Once Write or WriteHeader is called, Hijack returns an error.
// Once Hijack is called, Write, WriteHeader, and Hijack return errors.
type tunnelingResponseWriter struct {
// w is used to delegate Header(), WriteHeader(), and Write() calls
w http.ResponseWriter
// conn is returned from Hijack()
conn net.Conn
// mu guards writes
mu sync.Mutex
// wrote tracks whether WriteHeader or Write has been called
written bool
// hijacked tracks whether Hijack has been called
hijacked bool
}
// Hijack returns a delegate "net.Conn".
// An error is returned if Write(), WriteHeader(), or Hijack() was previously called.
// The returned bufio.ReadWriter is always nil.
func (w *tunnelingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
w.mu.Lock()
defer w.mu.Unlock()
if w.written {
klog.Errorf("Hijack called after write")
return nil, nil, errors.New("connection has already been written to")
}
if w.hijacked {
klog.Errorf("Hijack called after hijack")
return nil, nil, errors.New("connection has already been hijacked")
}
w.hijacked = true
klog.V(6).Infof("Hijack returning websocket tunneling net.Conn")
return w.conn, nil, nil
}
// Header is delegated to the stored "http.ResponseWriter".
func (w *tunnelingResponseWriter) Header() http.Header {
return w.w.Header()
}
// Write is delegated to the stored "http.ResponseWriter".
func (w *tunnelingResponseWriter) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
if w.hijacked {
klog.Errorf("Write called after hijack")
return 0, http.ErrHijacked
}
w.written = true
return w.w.Write(p)
}
// WriteHeader is delegated to the stored "http.ResponseWriter".
func (w *tunnelingResponseWriter) WriteHeader(statusCode int) {
w.mu.Lock()
defer w.mu.Unlock()
if w.written {
klog.Errorf("WriteHeader called after write")
return
}
if w.hijacked {
klog.Errorf("WriteHeader called after hijack")
return
}
w.written = true
if statusCode == http.StatusSwitchingProtocols {
// 101 upgrade responses must come via the hijacked connection, not WriteHeader
klog.Errorf("WriteHeader called with 101 upgrade")
http.Error(w.w, "unexpected upgrade", http.StatusInternalServerError)
return
}
// pass through non-upgrade responses we don't need to translate
w.w.WriteHeader(statusCode)
}
// headerInterceptingConn wraps the tunneling "net.Conn" to drain the
// HTTP response status/headers from the upstream SPDY connection, then use
// that to decide how to initialize the delegate connection for writes.
type headerInterceptingConn struct {
// initializableConn is delegated to for all net.Conn methods.
// initializableConn.Write() is not called until response headers have been read
// and initializableConn#InitializeWrite() has been called with the result.
initializableConn
lock sync.Mutex
headerBuffer []byte
initialized bool
initializeErr error
}
// initializableConn is a connection that will be initialized before any calls to Write are made
type initializableConn interface {
net.Conn
// InitializeWrite is called when the backend response headers have been read.
// backendResponse contains the parsed headers.
// backendResponseBytes are the raw bytes the headers were parsed from.
InitializeWrite(backendResponse *http.Response, backendResponseBytes []byte) error
}
const maxHeaderBytes = 1 << 20
// token for normal header / body separation (\r\n\r\n, but go tolerates the leading \r being absent)
var lfCRLF = []byte("\n\r\n")
// token for header / body separation without \r (which go tolerates)
var lfLF = []byte("\n\n")
// Write intercepts to initially swallow the HTTP response, then
// delegate to the tunneling "net.Conn" once the response has been
// seen and processed.
func (h *headerInterceptingConn) Write(b []byte) (int, error) {
h.lock.Lock()
defer h.lock.Unlock()
if h.initializeErr != nil {
return 0, h.initializeErr
}
if h.initialized {
return h.initializableConn.Write(b)
}
// Guard against excessive buffering
if len(h.headerBuffer)+len(b) > maxHeaderBytes {
return 0, fmt.Errorf("header size limit exceeded")
}
// Accumulate into headerBuffer
h.headerBuffer = append(h.headerBuffer, b...)
// Attempt to parse http response headers
var headerBytes, bodyBytes []byte
if i := bytes.Index(h.headerBuffer, lfCRLF); i != -1 {
// headers terminated with \n\r\n
headerBytes = h.headerBuffer[0 : i+len(lfCRLF)]
bodyBytes = h.headerBuffer[i+len(lfCRLF):]
} else if i := bytes.Index(h.headerBuffer, lfLF); i != -1 {
// headers terminated with \n\n (which go tolerates)
headerBytes = h.headerBuffer[0 : i+len(lfLF)]
bodyBytes = h.headerBuffer[i+len(lfLF):]
} else {
// don't yet have a complete set of headers yet
return len(b), nil
}
resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(headerBytes)), nil)
if err != nil {
klog.Errorf("invalid headers: %v", err)
h.initializeErr = err
return len(b), err
}
resp.Body.Close() //nolint:errcheck
h.headerBuffer = nil
h.initialized = true
h.initializeErr = h.initializableConn.InitializeWrite(resp, headerBytes)
if h.initializeErr != nil {
return len(b), h.initializeErr
}
if len(bodyBytes) > 0 {
_, err = h.initializableConn.Write(bodyBytes)
}
return len(b), err
}
type tunnelingWebsocketUpgraderConn struct {
// req is the websocket request, used for upgrading
req *http.Request
// w is the websocket writer, used for upgrading and writing error responses
w http.ResponseWriter
// lock guards conn and err
lock sync.RWMutex
// if conn is non-nil, InitializeWrite succeeded
conn net.Conn
// if err is non-nil, InitializeWrite failed or Close was called before InitializeWrite
err error
}
func (u *tunnelingWebsocketUpgraderConn) InitializeWrite(backendResponse *http.Response, backendResponseBytes []byte) (err error) {
// make sure we close a connection we open in error cases
var conn net.Conn
defer func() {
if err != nil && conn != nil {
conn.Close() //nolint:errcheck
}
}()
u.lock.Lock()
defer u.lock.Unlock()
if u.conn != nil {
return fmt.Errorf("InitializeWrite already called")
}
if u.err != nil {
return u.err
}
if backendResponse.StatusCode == http.StatusSwitchingProtocols {
connectionHeader := strings.ToLower(backendResponse.Header.Get(httpstream.HeaderConnection))
upgradeHeader := strings.ToLower(backendResponse.Header.Get(httpstream.HeaderUpgrade))
if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(spdy.HeaderSpdy31)) {
klog.Errorf("unable to upgrade: missing upgrade headers in response: %#v", backendResponse.Header)
u.err = fmt.Errorf("unable to upgrade: missing upgrade headers in response")
metrics.IncStreamTunnelRequest(context.Background(), strconv.Itoa(http.StatusInternalServerError))
http.Error(u.w, u.err.Error(), http.StatusInternalServerError)
return u.err
}
// Translate the server's chosen SPDY protocol into the tunneled websocket protocol for the handshake
var serverWebsocketProtocols []string
if backendSPDYProtocol := strings.TrimSpace(backendResponse.Header.Get(httpstream.HeaderProtocolVersion)); backendSPDYProtocol != "" {
serverWebsocketProtocols = []string{constants.WebsocketsSPDYTunnelingPrefix + backendSPDYProtocol}
} else {
serverWebsocketProtocols = []string{}
}
// Try to upgrade the websocket connection.
// Beyond this point, we don't need to write errors to the response.
var upgrader = gwebsocket.Upgrader{
CheckOrigin: func(r *http.Request) bool { return true },
Subprotocols: serverWebsocketProtocols,
}
conn, err := upgrader.Upgrade(u.w, u.req, nil)
if err != nil {
klog.Errorf("error upgrading websocket connection: %v", err)
metrics.IncStreamTunnelRequest(context.Background(), strconv.Itoa(http.StatusInternalServerError))
u.err = err
return u.err
}
klog.V(4).Infof("websocket connection created: %s", conn.Subprotocol())
metrics.IncStreamTunnelRequest(context.Background(), strconv.Itoa(http.StatusSwitchingProtocols))
u.conn = portforward.NewTunnelingConnection("server", conn)
return nil
}
// anything other than an upgrade should pass through the backend response
klog.Errorf("SPDY upgrade failed: %s", backendResponse.Status)
metrics.IncStreamTunnelRequest(context.Background(), strconv.Itoa(backendResponse.StatusCode))
// try to hijack
conn, _, err = u.w.(http.Hijacker).Hijack()
if err != nil {
klog.Errorf("Unable to hijack response: %v", err)
u.err = err
return u.err
}
// replay the backend response bytes to the hijacked conn
conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) //nolint:errcheck
_, err = conn.Write(backendResponseBytes)
if err != nil {
u.err = err
return u.err
}
u.conn = conn
return nil
}
func (u *tunnelingWebsocketUpgraderConn) Read(b []byte) (n int, err error) {
u.lock.RLock()
defer u.lock.RUnlock()
if u.conn != nil {
return u.conn.Read(b)
}
if u.err != nil {
return 0, u.err
}
// return empty read without blocking until we are initialized
return 0, nil
}
func (u *tunnelingWebsocketUpgraderConn) Write(b []byte) (n int, err error) {
u.lock.RLock()
defer u.lock.RUnlock()
if u.conn != nil {
return u.conn.Write(b)
}
if u.err != nil {
return 0, u.err
}
return 0, fmt.Errorf("Write called before Initialize")
}
func (u *tunnelingWebsocketUpgraderConn) Close() error {
u.lock.Lock()
defer u.lock.Unlock()
if u.conn != nil {
return u.conn.Close()
}
if u.err != nil {
return u.err
}
// record that we closed so we don't write again or try to initialize
u.err = fmt.Errorf("connection closed")
// write a response
http.Error(u.w, u.err.Error(), http.StatusInternalServerError)
return nil
}
func (u *tunnelingWebsocketUpgraderConn) LocalAddr() net.Addr {
u.lock.RLock()
defer u.lock.RUnlock()
if u.conn != nil {
return u.conn.LocalAddr()
}
return noopAddr{}
}
func (u *tunnelingWebsocketUpgraderConn) RemoteAddr() net.Addr {
u.lock.RLock()
defer u.lock.RUnlock()
if u.conn != nil {
return u.conn.RemoteAddr()
}
return noopAddr{}
}
func (u *tunnelingWebsocketUpgraderConn) SetDeadline(t time.Time) error {
u.lock.RLock()
defer u.lock.RUnlock()
if u.conn != nil {
return u.conn.SetDeadline(t)
}
return nil
}
func (u *tunnelingWebsocketUpgraderConn) SetReadDeadline(t time.Time) error {
u.lock.RLock()
defer u.lock.RUnlock()
if u.conn != nil {
return u.conn.SetReadDeadline(t)
}
return nil
}
func (u *tunnelingWebsocketUpgraderConn) SetWriteDeadline(t time.Time) error {
u.lock.RLock()
defer u.lock.RUnlock()
if u.conn != nil {
return u.conn.SetWriteDeadline(t)
}
return nil
}
type noopAddr struct{}
func (n noopAddr) Network() string { return "" }
func (n noopAddr) String() string { return "" }

View File

@ -1,51 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"net/http"
"k8s.io/klog/v2"
)
// translatingHandler wraps the delegate handler, implementing the
// http.Handler interface. The delegate handles all requests unless
// the request satisfies the passed "shouldTranslate" function
// (currently only for WebSocket/V5 request), in which case the translator
// handles the request.
type translatingHandler struct {
delegate http.Handler
translator http.Handler
shouldTranslate func(*http.Request) bool
}
func NewTranslatingHandler(delegate http.Handler, translator http.Handler, shouldTranslate func(*http.Request) bool) http.Handler {
return &translatingHandler{
delegate: delegate,
translator: translator,
shouldTranslate: shouldTranslate,
}
}
func (t *translatingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if t.shouldTranslate(req) {
klog.V(4).Infof("request handled by translator proxy")
t.translator.ServeHTTP(w, req)
return
}
t.delegate.ServeHTTP(w, req)
}

View File

@ -1,200 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/httpstream/wsstream"
constants "k8s.io/apimachinery/pkg/util/remotecommand"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/remotecommand"
)
const (
// idleTimeout is the read/write deadline set for websocket server connection. Reading
// or writing the connection will return an i/o timeout if this deadline is exceeded.
// Currently, we use the same value as the kubelet websocket server.
defaultIdleConnectionTimeout = 4 * time.Hour
// Deadline for writing errors to the websocket connection before io/timeout.
writeErrorDeadline = 10 * time.Second
)
// Options contains details about which streams are required for
// remote command execution.
type Options struct {
Stdin bool
Stdout bool
Stderr bool
Tty bool
}
// conns contains the connection and streams used when
// forwarding an attach or execute session into a container.
type conns struct {
conn io.Closer
stdinStream io.ReadCloser
stdoutStream io.WriteCloser
stderrStream io.WriteCloser
writeStatus func(status *apierrors.StatusError) error
resizeStream io.ReadCloser
resizeChan chan remotecommand.TerminalSize
tty bool
}
// Create WebSocket server streams to respond to a WebSocket client. Creates the streams passed
// in the stream options.
func webSocketServerStreams(req *http.Request, w http.ResponseWriter, opts Options) (*conns, error) {
ctx, err := createWebSocketStreams(req, w, opts)
if err != nil {
return nil, err
}
if ctx.resizeStream != nil {
ctx.resizeChan = make(chan remotecommand.TerminalSize)
go func() {
// Resize channel closes in panic case, and panic does not take down caller.
defer func() {
if p := recover(); p != nil {
// Standard panic logging.
for _, fn := range runtime.PanicHandlers {
fn(req.Context(), p)
}
}
}()
handleResizeEvents(req.Context(), ctx.resizeStream, ctx.resizeChan)
}()
}
return ctx, nil
}
// Read terminal resize events off of passed stream and queue into passed channel.
func handleResizeEvents(ctx context.Context, stream io.Reader, channel chan<- remotecommand.TerminalSize) {
defer close(channel)
decoder := json.NewDecoder(stream)
for {
size := remotecommand.TerminalSize{}
if err := decoder.Decode(&size); err != nil {
break
}
select {
case channel <- size:
case <-ctx.Done():
// To avoid leaking this routine, exit if the http request finishes. This path
// would generally be hit if starting the process fails and nothing is started to
// ingest these resize events.
return
}
}
}
// createChannels returns the standard channel types for a shell connection (STDIN 0, STDOUT 1, STDERR 2)
// along with the approximate duplex value. It also creates the error (3) and resize (4) channels.
func createChannels(opts Options) []wsstream.ChannelType {
// open the requested channels, and always open the error channel
channels := make([]wsstream.ChannelType, 5)
channels[constants.StreamStdIn] = readChannel(opts.Stdin)
channels[constants.StreamStdOut] = writeChannel(opts.Stdout)
channels[constants.StreamStdErr] = writeChannel(opts.Stderr)
channels[constants.StreamErr] = wsstream.WriteChannel
channels[constants.StreamResize] = wsstream.ReadChannel
return channels
}
// readChannel returns wsstream.ReadChannel if real is true, or wsstream.IgnoreChannel.
func readChannel(real bool) wsstream.ChannelType {
if real {
return wsstream.ReadChannel
}
return wsstream.IgnoreChannel
}
// writeChannel returns wsstream.WriteChannel if real is true, or wsstream.IgnoreChannel.
func writeChannel(real bool) wsstream.ChannelType {
if real {
return wsstream.WriteChannel
}
return wsstream.IgnoreChannel
}
// createWebSocketStreams returns a "conns" struct containing the websocket connection and
// streams needed to perform an exec or an attach.
func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts Options) (*conns, error) {
channels := createChannels(opts)
conn := wsstream.NewConn(map[string]wsstream.ChannelProtocolConfig{
// WebSocket server only supports remote command version 5.
constants.StreamProtocolV5Name: {
Binary: true,
Channels: channels,
},
})
conn.SetIdleTimeout(defaultIdleConnectionTimeout)
// Opening the connection responds to WebSocket client, negotiating
// the WebSocket upgrade connection and the subprotocol.
_, streams, err := conn.Open(w, req)
if err != nil {
return nil, err
}
// Send an empty message to the lowest writable channel to notify the client the connection is established
switch {
case opts.Stdout:
_, err = streams[constants.StreamStdOut].Write([]byte{})
case opts.Stderr:
_, err = streams[constants.StreamStdErr].Write([]byte{})
default:
_, err = streams[constants.StreamErr].Write([]byte{})
}
if err != nil {
conn.Close()
return nil, fmt.Errorf("write error during websocket server creation: %v", err)
}
ctx := &conns{
conn: conn,
stdinStream: streams[constants.StreamStdIn],
stdoutStream: streams[constants.StreamStdOut],
stderrStream: streams[constants.StreamStdErr],
tty: opts.Tty,
resizeStream: streams[constants.StreamResize],
}
// writeStatus returns a WriteStatusFunc that marshals a given api Status
// as json in the error channel.
ctx.writeStatus = func(status *apierrors.StatusError) error {
bs, err := json.Marshal(status.Status())
if err != nil {
return err
}
// Write status error to error stream with deadline.
conn.SetWriteDeadline(writeErrorDeadline)
_, err = streams[constants.StreamErr].Write(bs)
return err
}
return ctx, nil
}

View File

@ -1,10 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- aojea
- liggitt
- seans3
reviewers:
- aojea
- liggitt
- seans3

View File

@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package portforward adds support for SSH-like port forwarding from the client's
// local host to remote containers.
package portforward // import "k8s.io/client-go/tools/portforward"

View File

@ -1,57 +0,0 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/klog/v2"
)
var _ httpstream.Dialer = &FallbackDialer{}
// FallbackDialer encapsulates a primary and secondary dialer, including
// the boolean function to determine if the primary dialer failed. Implements
// the httpstream.Dialer interface.
type FallbackDialer struct {
primary httpstream.Dialer
secondary httpstream.Dialer
shouldFallback func(error) bool
}
// NewFallbackDialer creates the FallbackDialer with the primary and secondary dialers,
// as well as the boolean function to determine if the primary dialer failed.
func NewFallbackDialer(primary, secondary httpstream.Dialer, shouldFallback func(error) bool) httpstream.Dialer {
return &FallbackDialer{
primary: primary,
secondary: secondary,
shouldFallback: shouldFallback,
}
}
// Dial is the single function necessary to implement the "httpstream.Dialer" interface.
// It takes the protocol version strings to request, returning an the upgraded
// httstream.Connection and the negotiated protocol version accepted. If the initial
// primary dialer fails, this function attempts the secondary dialer. Returns an error
// if one occurs.
func (f *FallbackDialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
conn, version, err := f.primary.Dial(protocols...)
if err != nil && f.shouldFallback(err) {
klog.V(4).Infof("fallback to secondary dialer from primary dialer err: %v", err)
return f.secondary.Dial(protocols...)
}
return conn, version, err
}

View File

@ -1,443 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"errors"
"fmt"
"io"
"net"
"net/http"
"sort"
"strconv"
"strings"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/runtime"
netutils "k8s.io/utils/net"
)
// PortForwardProtocolV1Name is the subprotocol used for port forwarding.
// TODO move to API machinery and re-unify with kubelet/server/portfoward
const PortForwardProtocolV1Name = "portforward.k8s.io"
var ErrLostConnectionToPod = errors.New("lost connection to pod")
// PortForwarder knows how to listen for local connections and forward them to
// a remote pod via an upgraded HTTP request.
type PortForwarder struct {
addresses []listenAddress
ports []ForwardedPort
stopChan <-chan struct{}
dialer httpstream.Dialer
streamConn httpstream.Connection
listeners []io.Closer
Ready chan struct{}
requestIDLock sync.Mutex
requestID int
out io.Writer
errOut io.Writer
}
// ForwardedPort contains a Local:Remote port pairing.
type ForwardedPort struct {
Local uint16
Remote uint16
}
/*
valid port specifications:
5000
- forwards from localhost:5000 to pod:5000
8888:5000
- forwards from localhost:8888 to pod:5000
0:5000
:5000
- selects a random available local port,
forwards from localhost:<random port> to pod:5000
*/
func parsePorts(ports []string) ([]ForwardedPort, error) {
var forwards []ForwardedPort
for _, portString := range ports {
parts := strings.Split(portString, ":")
var localString, remoteString string
if len(parts) == 1 {
localString = parts[0]
remoteString = parts[0]
} else if len(parts) == 2 {
localString = parts[0]
if localString == "" {
// support :5000
localString = "0"
}
remoteString = parts[1]
} else {
return nil, fmt.Errorf("invalid port format '%s'", portString)
}
localPort, err := strconv.ParseUint(localString, 10, 16)
if err != nil {
return nil, fmt.Errorf("error parsing local port '%s': %s", localString, err)
}
remotePort, err := strconv.ParseUint(remoteString, 10, 16)
if err != nil {
return nil, fmt.Errorf("error parsing remote port '%s': %s", remoteString, err)
}
if remotePort == 0 {
return nil, fmt.Errorf("remote port must be > 0")
}
forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)})
}
return forwards, nil
}
type listenAddress struct {
address string
protocol string
failureMode string
}
func parseAddresses(addressesToParse []string) ([]listenAddress, error) {
var addresses []listenAddress
parsed := make(map[string]listenAddress)
for _, address := range addressesToParse {
if address == "localhost" {
if _, exists := parsed["127.0.0.1"]; !exists {
ip := listenAddress{address: "127.0.0.1", protocol: "tcp4", failureMode: "all"}
parsed[ip.address] = ip
}
if _, exists := parsed["::1"]; !exists {
ip := listenAddress{address: "::1", protocol: "tcp6", failureMode: "all"}
parsed[ip.address] = ip
}
} else if netutils.ParseIPSloppy(address).To4() != nil {
parsed[address] = listenAddress{address: address, protocol: "tcp4", failureMode: "any"}
} else if netutils.ParseIPSloppy(address) != nil {
parsed[address] = listenAddress{address: address, protocol: "tcp6", failureMode: "any"}
} else {
return nil, fmt.Errorf("%s is not a valid IP", address)
}
}
addresses = make([]listenAddress, len(parsed))
id := 0
for _, v := range parsed {
addresses[id] = v
id++
}
// Sort addresses before returning to get a stable order
sort.Slice(addresses, func(i, j int) bool { return addresses[i].address < addresses[j].address })
return addresses, nil
}
// New creates a new PortForwarder with localhost listen addresses.
func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
return NewOnAddresses(dialer, []string{"localhost"}, ports, stopChan, readyChan, out, errOut)
}
// NewOnAddresses creates a new PortForwarder with custom listen addresses.
func NewOnAddresses(dialer httpstream.Dialer, addresses []string, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
if len(addresses) == 0 {
return nil, errors.New("you must specify at least 1 address")
}
parsedAddresses, err := parseAddresses(addresses)
if err != nil {
return nil, err
}
if len(ports) == 0 {
return nil, errors.New("you must specify at least 1 port")
}
parsedPorts, err := parsePorts(ports)
if err != nil {
return nil, err
}
return &PortForwarder{
dialer: dialer,
addresses: parsedAddresses,
ports: parsedPorts,
stopChan: stopChan,
Ready: readyChan,
out: out,
errOut: errOut,
}, nil
}
// ForwardPorts formats and executes a port forwarding request. The connection will remain
// open until stopChan is closed.
func (pf *PortForwarder) ForwardPorts() error {
defer pf.Close()
var err error
var protocol string
pf.streamConn, protocol, err = pf.dialer.Dial(PortForwardProtocolV1Name)
if err != nil {
return fmt.Errorf("error upgrading connection: %s", err)
}
defer pf.streamConn.Close()
if protocol != PortForwardProtocolV1Name {
return fmt.Errorf("unable to negotiate protocol: client supports %q, server returned %q", PortForwardProtocolV1Name, protocol)
}
return pf.forward()
}
// forward dials the remote host specific in req, upgrades the request, starts
// listeners for each port specified in ports, and forwards local connections
// to the remote host via streams.
func (pf *PortForwarder) forward() error {
var err error
listenSuccess := false
for i := range pf.ports {
port := &pf.ports[i]
err = pf.listenOnPort(port)
switch {
case err == nil:
listenSuccess = true
default:
if pf.errOut != nil {
fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err)
}
}
}
if !listenSuccess {
return fmt.Errorf("unable to listen on any of the requested ports: %v", pf.ports)
}
if pf.Ready != nil {
close(pf.Ready)
}
// wait for interrupt or conn closure
select {
case <-pf.stopChan:
case <-pf.streamConn.CloseChan():
return ErrLostConnectionToPod
}
return nil
}
// listenOnPort delegates listener creation and waits for connections on requested bind addresses.
// An error is raised based on address groups (default and localhost) and their failure modes
func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error {
var errors []error
failCounters := make(map[string]int, 2)
successCounters := make(map[string]int, 2)
for _, addr := range pf.addresses {
err := pf.listenOnPortAndAddress(port, addr.protocol, addr.address)
if err != nil {
errors = append(errors, err)
failCounters[addr.failureMode]++
} else {
successCounters[addr.failureMode]++
}
}
if successCounters["all"] == 0 && failCounters["all"] > 0 {
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
}
if failCounters["any"] > 0 {
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
}
return nil
}
// listenOnPortAndAddress delegates listener creation and waits for new connections
// in the background f
func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol string, address string) error {
listener, err := pf.getListener(protocol, address, port)
if err != nil {
return err
}
pf.listeners = append(pf.listeners, listener)
go pf.waitForConnection(listener, *port)
return nil
}
// getListener creates a listener on the interface targeted by the given hostname on the given port with
// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6
func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) {
listener, err := net.Listen(protocol, net.JoinHostPort(hostname, strconv.Itoa(int(port.Local))))
if err != nil {
return nil, fmt.Errorf("unable to create listener: Error %s", err)
}
listenerAddress := listener.Addr().String()
host, localPort, _ := net.SplitHostPort(listenerAddress)
localPortUInt, err := strconv.ParseUint(localPort, 10, 16)
if err != nil {
fmt.Fprintf(pf.out, "Failed to forward from %s:%d -> %d\n", hostname, localPortUInt, port.Remote)
return nil, fmt.Errorf("error parsing local port: %s from %s (%s)", err, listenerAddress, host)
}
port.Local = uint16(localPortUInt)
if pf.out != nil {
fmt.Fprintf(pf.out, "Forwarding from %s -> %d\n", net.JoinHostPort(hostname, strconv.Itoa(int(localPortUInt))), port.Remote)
}
return listener, nil
}
// waitForConnection waits for new connections to listener and handles them in
// the background.
func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) {
for {
select {
case <-pf.streamConn.CloseChan():
return
default:
conn, err := listener.Accept()
if err != nil {
// TODO consider using something like https://github.com/hydrogen18/stoppableListener?
if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error accepting connection on port %d: %v", port.Local, err))
}
return
}
go pf.handleConnection(conn, port)
}
}
}
func (pf *PortForwarder) nextRequestID() int {
pf.requestIDLock.Lock()
defer pf.requestIDLock.Unlock()
id := pf.requestID
pf.requestID++
return id
}
// handleConnection copies data between the local connection and the stream to
// the remote server.
func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
defer conn.Close()
if pf.out != nil {
fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local)
}
requestID := pf.nextRequestID()
// create error stream
headers := http.Header{}
headers.Set(v1.StreamType, v1.StreamTypeError)
headers.Set(v1.PortHeader, fmt.Sprintf("%d", port.Remote))
headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID))
errorStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
// we're not writing to this stream
errorStream.Close()
defer pf.streamConn.RemoveStreams(errorStream)
errorChan := make(chan error)
go func() {
message, err := io.ReadAll(errorStream)
switch {
case err != nil:
errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err)
case len(message) > 0:
errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message))
}
close(errorChan)
}()
// create data stream
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
defer pf.streamConn.RemoveStreams(dataStream)
localError := make(chan struct{})
remoteDone := make(chan struct{})
go func() {
// Copy from the remote side to the local port.
if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err))
}
// inform the select below that the remote copy is done
close(remoteDone)
}()
go func() {
// inform server we're not sending any more data after copy unblocks
defer dataStream.Close()
// Copy from the local port to the remote side.
if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err))
// break out of the select below without waiting for the other copy to finish
close(localError)
}
}()
// wait for either a local->remote error or for copying from remote->local to finish
select {
case <-remoteDone:
case <-localError:
}
// always expect something on errorChan (it may be nil)
err = <-errorChan
if err != nil {
runtime.HandleError(err)
pf.streamConn.Close()
}
}
// Close stops all listeners of PortForwarder.
func (pf *PortForwarder) Close() {
// stop all listeners
for _, l := range pf.listeners {
if err := l.Close(); err != nil {
runtime.HandleError(fmt.Errorf("error closing listener: %v", err))
}
}
}
// GetPorts will return the ports that were forwarded; this can be used to
// retrieve the locally-bound port in cases where the input was port 0. This
// function will signal an error if the Ready channel is nil or if the
// listeners are not ready yet; this function will succeed after the Ready
// channel has been closed.
func (pf *PortForwarder) GetPorts() ([]ForwardedPort, error) {
if pf.Ready == nil {
return nil, fmt.Errorf("no Ready channel provided")
}
select {
case <-pf.Ready:
return pf.ports, nil
default:
return nil, fmt.Errorf("listeners not ready")
}
}

View File

@ -1,158 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"errors"
"fmt"
"io"
"net"
"sync"
"time"
gwebsocket "github.com/gorilla/websocket"
"k8s.io/klog/v2"
)
var _ net.Conn = &TunnelingConnection{}
// TunnelingConnection implements the "httpstream.Connection" interface, wrapping
// a websocket connection that tunnels SPDY.
type TunnelingConnection struct {
name string
conn *gwebsocket.Conn
inProgressMessage io.Reader
closeOnce sync.Once
}
// NewTunnelingConnection wraps the passed gorilla/websockets connection
// with the TunnelingConnection struct (implementing net.Conn).
func NewTunnelingConnection(name string, conn *gwebsocket.Conn) *TunnelingConnection {
return &TunnelingConnection{
name: name,
conn: conn,
}
}
// Read implements "io.Reader" interface, reading from the stored connection
// into the passed buffer "p". Returns the number of bytes read and an error.
// Can keep track of the "inProgress" messsage from the tunneled connection.
func (c *TunnelingConnection) Read(p []byte) (int, error) {
klog.V(7).Infof("%s: tunneling connection read...", c.name)
defer klog.V(7).Infof("%s: tunneling connection read...complete", c.name)
for {
if c.inProgressMessage == nil {
klog.V(8).Infof("%s: tunneling connection read before NextReader()...", c.name)
messageType, nextReader, err := c.conn.NextReader()
if err != nil {
closeError := &gwebsocket.CloseError{}
if errors.As(err, &closeError) && closeError.Code == gwebsocket.CloseNormalClosure {
return 0, io.EOF
}
klog.V(4).Infof("%s:tunneling connection NextReader() error: %v", c.name, err)
return 0, err
}
if messageType != gwebsocket.BinaryMessage {
return 0, fmt.Errorf("invalid message type received")
}
c.inProgressMessage = nextReader
}
klog.V(8).Infof("%s: tunneling connection read in progress message...", c.name)
i, err := c.inProgressMessage.Read(p)
if i == 0 && err == io.EOF {
c.inProgressMessage = nil
} else {
klog.V(8).Infof("%s: read %d bytes, error=%v, bytes=% X", c.name, i, err, p[:i])
return i, err
}
}
}
// Write implements "io.Writer" interface, copying the data in the passed
// byte array "p" into the stored tunneled connection. Returns the number
// of bytes written and an error.
func (c *TunnelingConnection) Write(p []byte) (n int, err error) {
klog.V(7).Infof("%s: write: %d bytes, bytes=% X", c.name, len(p), p)
defer klog.V(7).Infof("%s: tunneling connection write...complete", c.name)
w, err := c.conn.NextWriter(gwebsocket.BinaryMessage)
if err != nil {
return 0, err
}
defer func() {
// close, which flushes the message
closeErr := w.Close()
if closeErr != nil && err == nil {
// if closing/flushing errored and we weren't already returning an error, return the close error
err = closeErr
}
}()
n, err = w.Write(p)
return
}
// Close implements "io.Closer" interface, signaling the other tunneled connection
// endpoint, and closing the tunneled connection only once.
func (c *TunnelingConnection) Close() error {
var err error
c.closeOnce.Do(func() {
klog.V(7).Infof("%s: tunneling connection Close()...", c.name)
// Signal other endpoint that websocket connection is closing; ignore error.
normalCloseMsg := gwebsocket.FormatCloseMessage(gwebsocket.CloseNormalClosure, "")
writeControlErr := c.conn.WriteControl(gwebsocket.CloseMessage, normalCloseMsg, time.Now().Add(time.Second))
closeErr := c.conn.Close()
if closeErr != nil {
err = closeErr
} else if writeControlErr != nil {
err = writeControlErr
}
})
return err
}
// LocalAddr implements part of the "net.Conn" interface, returning the local
// endpoint network address of the tunneled connection.
func (c *TunnelingConnection) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
// LocalAddr implements part of the "net.Conn" interface, returning the remote
// endpoint network address of the tunneled connection.
func (c *TunnelingConnection) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
// SetDeadline sets the *absolute* time in the future for both
// read and write deadlines. Returns an error if one occurs.
func (c *TunnelingConnection) SetDeadline(t time.Time) error {
rerr := c.SetReadDeadline(t)
werr := c.SetWriteDeadline(t)
return errors.Join(rerr, werr)
}
// SetDeadline sets the *absolute* time in the future for the
// read deadlines. Returns an error if one occurs.
func (c *TunnelingConnection) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
// SetDeadline sets the *absolute* time in the future for the
// write deadlines. Returns an error if one occurs.
func (c *TunnelingConnection) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}

View File

@ -1,93 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"fmt"
"net/http"
"net/url"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/httpstream/spdy"
constants "k8s.io/apimachinery/pkg/util/portforward"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/transport/websocket"
"k8s.io/klog/v2"
)
const PingPeriod = 10 * time.Second
// tunnelingDialer implements "httpstream.Dial" interface
type tunnelingDialer struct {
url *url.URL
transport http.RoundTripper
holder websocket.ConnectionHolder
}
// NewTunnelingDialer creates and returns the tunnelingDialer structure which implemements the "httpstream.Dialer"
// interface. The dialer can upgrade a websocket request, creating a websocket connection. This function
// returns an error if one occurs.
func NewSPDYOverWebsocketDialer(url *url.URL, config *restclient.Config) (httpstream.Dialer, error) {
transport, holder, err := websocket.RoundTripperFor(config)
if err != nil {
return nil, err
}
return &tunnelingDialer{
url: url,
transport: transport,
holder: holder,
}, nil
}
// Dial upgrades to a tunneling streaming connection, returning a SPDY connection
// containing a WebSockets connection (which implements "net.Conn"). Also
// returns the protocol negotiated, or an error.
func (d *tunnelingDialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
// There is no passed context, so skip the context when creating request for now.
// Websockets requires "GET" method: RFC 6455 Sec. 4.1 (page 17).
req, err := http.NewRequest("GET", d.url.String(), nil)
if err != nil {
return nil, "", err
}
// Add the spdy tunneling prefix to the requested protocols. The tunneling
// handler will know how to negotiate these protocols.
tunnelingProtocols := []string{}
for _, protocol := range protocols {
tunnelingProtocol := constants.WebsocketsSPDYTunnelingPrefix + protocol
tunnelingProtocols = append(tunnelingProtocols, tunnelingProtocol)
}
klog.V(4).Infoln("Before WebSocket Upgrade Connection...")
conn, err := websocket.Negotiate(d.transport, d.holder, req, tunnelingProtocols...)
if err != nil {
return nil, "", err
}
if conn == nil {
return nil, "", fmt.Errorf("negotiated websocket connection is nil")
}
protocol := conn.Subprotocol()
protocol = strings.TrimPrefix(protocol, constants.WebsocketsSPDYTunnelingPrefix)
klog.V(4).Infof("negotiated protocol: %s", protocol)
// Wrap the websocket connection which implements "net.Conn".
tConn := NewTunnelingConnection("client", conn)
// Create SPDY connection injecting the previously created tunneling connection.
spdyConn, err := spdy.NewClientConnectionWithPings(tConn, PingPeriod)
return spdyConn, protocol, err
}

View File

@ -1,33 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
"k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
)
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(apiregistration.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}

View File

@ -1,125 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/api/validation/path"
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
)
// ValidateAPIService validates that the APIService is correctly defined.
func ValidateAPIService(apiService *apiregistration.APIService) field.ErrorList {
requiredName := apiService.Spec.Version + "." + apiService.Spec.Group
allErrs := validation.ValidateObjectMeta(&apiService.ObjectMeta, false,
func(name string, prefix bool) []string {
if minimalFailures := path.IsValidPathSegmentName(name); len(minimalFailures) > 0 {
return minimalFailures
}
// the name *must* be version.group
if name != requiredName {
return []string{fmt.Sprintf("must be `spec.version+\".\"+spec.group`: %q", requiredName)}
}
return []string{}
},
field.NewPath("metadata"))
// in this case we allow empty group
if len(apiService.Spec.Group) == 0 && apiService.Spec.Version != "v1" {
allErrs = append(allErrs, field.Required(field.NewPath("spec", "group"), "only v1 may have an empty group and it better be legacy kube"))
}
if len(apiService.Spec.Group) > 0 {
for _, errString := range utilvalidation.IsDNS1123Subdomain(apiService.Spec.Group) {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "group"), apiService.Spec.Group, errString))
}
}
for _, errString := range utilvalidation.IsDNS1035Label(apiService.Spec.Version) {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "version"), apiService.Spec.Version, errString))
}
if apiService.Spec.GroupPriorityMinimum <= 0 || apiService.Spec.GroupPriorityMinimum > 20000 {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "groupPriorityMinimum"), apiService.Spec.GroupPriorityMinimum, "must be positive and less than 20000"))
}
if apiService.Spec.VersionPriority <= 0 || apiService.Spec.VersionPriority > 1000 {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "versionPriority"), apiService.Spec.VersionPriority, "must be positive and less than 1000"))
}
if apiService.Spec.Service == nil {
if len(apiService.Spec.CABundle) != 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "caBundle"), fmt.Sprintf("%d bytes", len(apiService.Spec.CABundle)), "local APIServices may not have a caBundle"))
}
if apiService.Spec.InsecureSkipTLSVerify {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "insecureSkipTLSVerify"), apiService.Spec.InsecureSkipTLSVerify, "local APIServices may not have insecureSkipTLSVerify"))
}
return allErrs
}
if len(apiService.Spec.Service.Namespace) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("spec", "service", "namespace"), ""))
}
if len(apiService.Spec.Service.Name) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("spec", "service", "name"), ""))
}
if errs := utilvalidation.IsValidPortNum(int(apiService.Spec.Service.Port)); errs != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "service", "port"), apiService.Spec.Service.Port, "port is not valid: "+strings.Join(errs, ", ")))
}
if apiService.Spec.InsecureSkipTLSVerify && len(apiService.Spec.CABundle) > 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "insecureSkipTLSVerify"), apiService.Spec.InsecureSkipTLSVerify, "may not be true if caBundle is present"))
}
return allErrs
}
// ValidateAPIServiceUpdate validates an update of APIService.
func ValidateAPIServiceUpdate(newAPIService *apiregistration.APIService, oldAPIService *apiregistration.APIService) field.ErrorList {
allErrs := validation.ValidateObjectMetaUpdate(&newAPIService.ObjectMeta, &oldAPIService.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateAPIService(newAPIService)...)
return allErrs
}
// ValidateAPIServiceStatus validates that the APIService status is one of 'True', 'False' or 'Unknown'.
func ValidateAPIServiceStatus(status *apiregistration.APIServiceStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, condition := range status.Conditions {
if condition.Status != apiregistration.ConditionTrue &&
condition.Status != apiregistration.ConditionFalse &&
condition.Status != apiregistration.ConditionUnknown {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("conditions").Index(i).Child("status"), condition.Status, []string{
string(apiregistration.ConditionTrue), string(apiregistration.ConditionFalse), string(apiregistration.ConditionUnknown)}))
}
}
return allErrs
}
// ValidateAPIServiceStatusUpdate validates an update of the status field of APIService.
func ValidateAPIServiceStatusUpdate(newAPIService *apiregistration.APIService, oldAPIService *apiregistration.APIService) field.ErrorList {
allErrs := validation.ValidateObjectMetaUpdate(&newAPIService.ObjectMeta, &oldAPIService.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateAPIServiceStatus(&newAPIService.Status, field.NewPath("status"))...)
return allErrs
}

View File

@ -1,647 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"context"
"fmt"
"net/http"
"sync"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/endpoints/discovery/aggregated"
genericfeatures "k8s.io/apiserver/pkg/features"
peerreconcilers "k8s.io/apiserver/pkg/reconcilers"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/dynamiccertificates"
"k8s.io/apiserver/pkg/server/egressselector"
serverstorage "k8s.io/apiserver/pkg/server/storage"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/transport"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/tracing"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
v1helper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper"
"k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme"
"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
informers "k8s.io/kube-aggregator/pkg/client/informers/externalversions"
listers "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1"
openapicontroller "k8s.io/kube-aggregator/pkg/controllers/openapi"
openapiaggregator "k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator"
openapiv3controller "k8s.io/kube-aggregator/pkg/controllers/openapiv3"
openapiv3aggregator "k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator"
localavailability "k8s.io/kube-aggregator/pkg/controllers/status/local"
availabilitymetrics "k8s.io/kube-aggregator/pkg/controllers/status/metrics"
remoteavailability "k8s.io/kube-aggregator/pkg/controllers/status/remote"
apiservicerest "k8s.io/kube-aggregator/pkg/registry/apiservice/rest"
openapicommon "k8s.io/kube-openapi/pkg/common"
)
// making sure we only register metrics once into legacy registry
var registerIntoLegacyRegistryOnce sync.Once
func init() {
// we need to add the options (like ListOptions) to empty v1
metav1.AddToGroupVersion(aggregatorscheme.Scheme, schema.GroupVersion{Group: "", Version: "v1"})
unversioned := schema.GroupVersion{Group: "", Version: "v1"}
aggregatorscheme.Scheme.AddUnversionedTypes(unversioned,
&metav1.Status{},
&metav1.APIVersions{},
&metav1.APIGroupList{},
&metav1.APIGroup{},
&metav1.APIResourceList{},
)
}
const (
// legacyAPIServiceName is the fixed name of the only non-groupified API version
legacyAPIServiceName = "v1."
// StorageVersionPostStartHookName is the name of the storage version updater post start hook.
StorageVersionPostStartHookName = "built-in-resources-storage-version-updater"
)
// ExtraConfig represents APIServices-specific configuration
type ExtraConfig struct {
// PeerAdvertiseAddress is the IP for this kube-apiserver which is used by peer apiservers to route a request
// to this apiserver. This happens in cases where the peer is not able to serve the request due to
// version skew. If unset, AdvertiseAddress/BindAddress will be used.
PeerAdvertiseAddress peerreconcilers.PeerAdvertiseAddress
// ProxyClientCert/Key are the client cert used to identify this proxy. Backing APIServices use
// this to confirm the proxy's identity
ProxyClientCertFile string
ProxyClientKeyFile string
// If present, the Dial method will be used for dialing out to delegate
// apiservers.
ProxyTransport *http.Transport
// Mechanism by which the Aggregator will resolve services. Required.
ServiceResolver ServiceResolver
RejectForwardingRedirects bool
// DisableRemoteAvailableConditionController disables the controller that updates the Available conditions for
// remote APIServices via querying endpoints of the referenced services. In generic controlplane use-cases,
// the concept of services and endpoints might differ, and might require another implementation of this
// controller. Local APIService are reconciled nevertheless.
DisableRemoteAvailableConditionController bool
}
// Config represents the configuration needed to create an APIAggregator.
type Config struct {
GenericConfig *genericapiserver.RecommendedConfig
ExtraConfig ExtraConfig
}
type completedConfig struct {
GenericConfig genericapiserver.CompletedConfig
ExtraConfig *ExtraConfig
}
// CompletedConfig same as Config, just to swap private object.
type CompletedConfig struct {
// Embed a private pointer that cannot be instantiated outside of this package.
*completedConfig
}
type runnable interface {
RunWithContext(ctx context.Context) error
}
// preparedGenericAPIServer is a private wrapper that enforces a call of PrepareRun() before Run can be invoked.
type preparedAPIAggregator struct {
*APIAggregator
runnable runnable
}
// APIAggregator contains state for a Kubernetes cluster master/api server.
type APIAggregator struct {
GenericAPIServer *genericapiserver.GenericAPIServer
// provided for easier embedding
APIRegistrationInformers informers.SharedInformerFactory
delegateHandler http.Handler
// proxyCurrentCertKeyContent holds he client cert used to identify this proxy. Backing APIServices use this to confirm the proxy's identity
proxyCurrentCertKeyContent certKeyFunc
proxyTransportDial *transport.DialHolder
// proxyHandlers are the proxy handlers that are currently registered, keyed by apiservice.name
proxyHandlers map[string]*proxyHandler
// handledGroupVersions contain the groups that already have routes. The key is the name of the group and the value
// is the versions for the group.
handledGroupVersions map[string]sets.Set[string]
// lister is used to add group handling for /apis/<group> aggregator lookups based on
// controller state
lister listers.APIServiceLister
// Information needed to determine routing for the aggregator
serviceResolver ServiceResolver
// Enable swagger and/or OpenAPI if these configs are non-nil.
openAPIConfig *openapicommon.Config
// Enable OpenAPI V3 if these configs are non-nil
openAPIV3Config *openapicommon.OpenAPIV3Config
// openAPIAggregationController downloads and merges OpenAPI v2 specs.
openAPIAggregationController *openapicontroller.AggregationController
// openAPIV3AggregationController downloads and caches OpenAPI v3 specs.
openAPIV3AggregationController *openapiv3controller.AggregationController
// discoveryAggregationController downloads and caches discovery documents
// from all aggregated apiservices so they are available from /apis endpoint
// when discovery with resources are requested
discoveryAggregationController DiscoveryAggregationController
// rejectForwardingRedirects is whether to allow to forward redirect response
rejectForwardingRedirects bool
// tracerProvider is used to wrap the proxy transport and handler with tracing
tracerProvider tracing.TracerProvider
}
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
func (cfg *Config) Complete() CompletedConfig {
c := completedConfig{
cfg.GenericConfig.Complete(),
&cfg.ExtraConfig,
}
// the kube aggregator wires its own discovery mechanism
// TODO eventually collapse this by extracting all of the discovery out
c.GenericConfig.EnableDiscovery = false
return CompletedConfig{&c}
}
// NewWithDelegate returns a new instance of APIAggregator from the given config.
func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.DelegationTarget) (*APIAggregator, error) {
genericServer, err := c.GenericConfig.New("kube-aggregator", delegationTarget)
if err != nil {
return nil, err
}
apiregistrationClient, err := clientset.NewForConfig(c.GenericConfig.LoopbackClientConfig)
if err != nil {
return nil, err
}
informerFactory := informers.NewSharedInformerFactory(
apiregistrationClient,
5*time.Minute, // this is effectively used as a refresh interval right now. Might want to do something nicer later on.
)
// apiServiceRegistrationControllerInitiated is closed when APIServiceRegistrationController has finished "installing" all known APIServices.
// At this point we know that the proxy handler knows about APIServices and can handle client requests.
// Before it might have resulted in a 404 response which could have serious consequences for some controllers like GC and NS
//
// Note that the APIServiceRegistrationController waits for APIServiceInformer to synced before doing its work.
apiServiceRegistrationControllerInitiated := make(chan struct{})
if err := genericServer.RegisterMuxAndDiscoveryCompleteSignal("APIServiceRegistrationControllerInitiated", apiServiceRegistrationControllerInitiated); err != nil {
return nil, err
}
var proxyTransportDial *transport.DialHolder
if c.GenericConfig.EgressSelector != nil {
egressDialer, err := c.GenericConfig.EgressSelector.Lookup(egressselector.Cluster.AsNetworkContext())
if err != nil {
return nil, err
}
if egressDialer != nil {
proxyTransportDial = &transport.DialHolder{Dial: egressDialer}
}
} else if c.ExtraConfig.ProxyTransport != nil && c.ExtraConfig.ProxyTransport.DialContext != nil {
proxyTransportDial = &transport.DialHolder{Dial: c.ExtraConfig.ProxyTransport.DialContext}
}
s := &APIAggregator{
GenericAPIServer: genericServer,
delegateHandler: delegationTarget.UnprotectedHandler(),
proxyTransportDial: proxyTransportDial,
proxyHandlers: map[string]*proxyHandler{},
handledGroupVersions: map[string]sets.Set[string]{},
lister: informerFactory.Apiregistration().V1().APIServices().Lister(),
APIRegistrationInformers: informerFactory,
serviceResolver: c.ExtraConfig.ServiceResolver,
openAPIConfig: c.GenericConfig.OpenAPIConfig,
openAPIV3Config: c.GenericConfig.OpenAPIV3Config,
proxyCurrentCertKeyContent: func() (bytes []byte, bytes2 []byte) { return nil, nil },
rejectForwardingRedirects: c.ExtraConfig.RejectForwardingRedirects,
tracerProvider: c.GenericConfig.TracerProvider,
}
// used later to filter the served resource by those that have expired.
resourceExpirationEvaluator, err := genericapiserver.NewResourceExpirationEvaluator(s.GenericAPIServer.EffectiveVersion.EmulationVersion())
if err != nil {
return nil, err
}
apiGroupInfo := apiservicerest.NewRESTStorage(c.GenericConfig.MergedResourceConfig, c.GenericConfig.RESTOptionsGetter, resourceExpirationEvaluator.ShouldServeForVersion(1, 22))
if err := s.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil {
return nil, err
}
enabledVersions := sets.NewString()
for v := range apiGroupInfo.VersionedResourcesStorageMap {
enabledVersions.Insert(v)
}
if !enabledVersions.Has(v1.SchemeGroupVersion.Version) {
return nil, fmt.Errorf("API group/version %s must be enabled", v1.SchemeGroupVersion.String())
}
apisHandler := &apisHandler{
codecs: aggregatorscheme.Codecs,
lister: s.lister,
discoveryGroup: discoveryGroup(enabledVersions),
}
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) {
apisHandlerWithAggregationSupport := aggregated.WrapAggregatedDiscoveryToHandler(apisHandler, s.GenericAPIServer.AggregatedDiscoveryGroupManager)
s.GenericAPIServer.Handler.NonGoRestfulMux.Handle("/apis", apisHandlerWithAggregationSupport)
} else {
s.GenericAPIServer.Handler.NonGoRestfulMux.Handle("/apis", apisHandler)
}
s.GenericAPIServer.Handler.NonGoRestfulMux.UnlistedHandle("/apis/", apisHandler)
apiserviceRegistrationController := NewAPIServiceRegistrationController(informerFactory.Apiregistration().V1().APIServices(), s)
if len(c.ExtraConfig.ProxyClientCertFile) > 0 && len(c.ExtraConfig.ProxyClientKeyFile) > 0 {
aggregatorProxyCerts, err := dynamiccertificates.NewDynamicServingContentFromFiles("aggregator-proxy-cert", c.ExtraConfig.ProxyClientCertFile, c.ExtraConfig.ProxyClientKeyFile)
if err != nil {
return nil, err
}
// We are passing the context to ProxyCerts.RunOnce as it needs to implement RunOnce(ctx) however the
// context is not used at all. So passing a empty context shouldn't be a problem
if err := aggregatorProxyCerts.RunOnce(context.Background()); err != nil {
return nil, err
}
aggregatorProxyCerts.AddListener(apiserviceRegistrationController)
s.proxyCurrentCertKeyContent = aggregatorProxyCerts.CurrentCertKeyContent
s.GenericAPIServer.AddPostStartHookOrDie("aggregator-reload-proxy-client-cert", func(postStartHookContext genericapiserver.PostStartHookContext) error {
go aggregatorProxyCerts.Run(postStartHookContext, 1)
return nil
})
}
s.GenericAPIServer.AddPostStartHookOrDie("start-kube-aggregator-informers", func(context genericapiserver.PostStartHookContext) error {
informerFactory.Start(context.Done())
c.GenericConfig.SharedInformerFactory.Start(context.Done())
return nil
})
// create shared (remote and local) availability metrics
// TODO: decouple from legacyregistry
metrics := availabilitymetrics.New()
registerIntoLegacyRegistryOnce.Do(func() { err = metrics.Register(legacyregistry.Register, legacyregistry.CustomRegister) })
if err != nil {
return nil, err
}
// always run local availability controller
local, err := localavailability.New(
informerFactory.Apiregistration().V1().APIServices(),
apiregistrationClient.ApiregistrationV1(),
metrics,
)
if err != nil {
return nil, err
}
s.GenericAPIServer.AddPostStartHookOrDie("apiservice-status-local-available-controller", func(context genericapiserver.PostStartHookContext) error {
// if we end up blocking for long periods of time, we may need to increase workers.
go local.Run(5, context.Done())
return nil
})
// conditionally run remote availability controller. This could be replaced in certain
// generic controlplane use-cases where there is another concept of services and/or endpoints.
if !c.ExtraConfig.DisableRemoteAvailableConditionController {
remote, err := remoteavailability.New(
informerFactory.Apiregistration().V1().APIServices(),
c.GenericConfig.SharedInformerFactory.Core().V1().Services(),
c.GenericConfig.SharedInformerFactory.Core().V1().Endpoints(),
apiregistrationClient.ApiregistrationV1(),
proxyTransportDial,
(func() ([]byte, []byte))(s.proxyCurrentCertKeyContent),
s.serviceResolver,
metrics,
)
if err != nil {
return nil, err
}
s.GenericAPIServer.AddPostStartHookOrDie("apiservice-status-remote-available-controller", func(context genericapiserver.PostStartHookContext) error {
// if we end up blocking for long periods of time, we may need to increase workers.
go remote.Run(5, context.Done())
return nil
})
}
s.GenericAPIServer.AddPostStartHookOrDie("apiservice-registration-controller", func(context genericapiserver.PostStartHookContext) error {
go apiserviceRegistrationController.Run(context.Done(), apiServiceRegistrationControllerInitiated)
select {
case <-context.Done():
case <-apiServiceRegistrationControllerInitiated:
}
return nil
})
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) {
s.discoveryAggregationController = NewDiscoveryManager(
// Use aggregator as the source name to avoid overwriting native/CRD
// groups
s.GenericAPIServer.AggregatedDiscoveryGroupManager.WithSource(aggregated.AggregatorSource),
)
// Setup discovery endpoint
s.GenericAPIServer.AddPostStartHookOrDie("apiservice-discovery-controller", func(context genericapiserver.PostStartHookContext) error {
// Discovery aggregation depends on the apiservice registration controller
// having the full list of APIServices already synced
select {
case <-context.Done():
return nil
// Context cancelled, should abort/clean goroutines
case <-apiServiceRegistrationControllerInitiated:
}
// Run discovery manager's worker to watch for new/removed/updated
// APIServices to the discovery document can be updated at runtime
// When discovery is ready, all APIServices will be present, with APIServices
// that have not successfully synced discovery to be present but marked as Stale.
discoverySyncedCh := make(chan struct{})
go s.discoveryAggregationController.Run(context.Done(), discoverySyncedCh)
select {
case <-context.Done():
return nil
// Context cancelled, should abort/clean goroutines
case <-discoverySyncedCh:
// API services successfully sync
}
return nil
})
}
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StorageVersionAPI) &&
utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerIdentity) {
// Spawn a goroutine in aggregator apiserver to update storage version for
// all built-in resources
s.GenericAPIServer.AddPostStartHookOrDie(StorageVersionPostStartHookName, func(hookContext genericapiserver.PostStartHookContext) error {
// Wait for apiserver-identity to exist first before updating storage
// versions, to avoid storage version GC accidentally garbage-collecting
// storage versions.
kubeClient, err := kubernetes.NewForConfig(hookContext.LoopbackClientConfig)
if err != nil {
return err
}
if err := wait.PollImmediateUntil(100*time.Millisecond, func() (bool, error) {
_, err := kubeClient.CoordinationV1().Leases(metav1.NamespaceSystem).Get(
context.TODO(), s.GenericAPIServer.APIServerID, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}, hookContext.Done()); err != nil {
return fmt.Errorf("failed to wait for apiserver-identity lease %s to be created: %v",
s.GenericAPIServer.APIServerID, err)
}
// Technically an apiserver only needs to update storage version once during bootstrap.
// Reconcile StorageVersion objects every 10 minutes will help in the case that the
// StorageVersion objects get accidentally modified/deleted by a different agent. In that
// case, the reconciliation ensures future storage migration still works. If nothing gets
// changed, the reconciliation update is a noop and gets short-circuited by the apiserver,
// therefore won't change the resource version and trigger storage migration.
go wait.PollImmediateUntil(10*time.Minute, func() (bool, error) {
// All apiservers (aggregator-apiserver, kube-apiserver, apiextensions-apiserver)
// share the same generic apiserver config. The same StorageVersion manager is used
// to register all built-in resources when the generic apiservers install APIs.
s.GenericAPIServer.StorageVersionManager.UpdateStorageVersions(hookContext.LoopbackClientConfig, s.GenericAPIServer.APIServerID)
return false, nil
}, hookContext.Done())
// Once the storage version updater finishes the first round of update,
// the PostStartHook will return to unblock /healthz. The handler chain
// won't block write requests anymore. Check every second since it's not
// expensive.
wait.PollImmediateUntil(1*time.Second, func() (bool, error) {
return s.GenericAPIServer.StorageVersionManager.Completed(), nil
}, hookContext.Done())
return nil
})
}
return s, nil
}
// PrepareRun prepares the aggregator to run, by setting up the OpenAPI spec &
// aggregated discovery document and calling the generic PrepareRun.
func (s *APIAggregator) PrepareRun() (preparedAPIAggregator, error) {
// add post start hook before generic PrepareRun in order to be before /healthz installation
if s.openAPIConfig != nil {
s.GenericAPIServer.AddPostStartHookOrDie("apiservice-openapi-controller", func(context genericapiserver.PostStartHookContext) error {
go s.openAPIAggregationController.Run(context.Done())
return nil
})
}
if s.openAPIV3Config != nil {
s.GenericAPIServer.AddPostStartHookOrDie("apiservice-openapiv3-controller", func(context genericapiserver.PostStartHookContext) error {
go s.openAPIV3AggregationController.Run(context.Done())
return nil
})
}
prepared := s.GenericAPIServer.PrepareRun()
// delay OpenAPI setup until the delegate had a chance to setup their OpenAPI handlers
if s.openAPIConfig != nil {
specDownloader := openapiaggregator.NewDownloader()
openAPIAggregator, err := openapiaggregator.BuildAndRegisterAggregator(
&specDownloader,
s.GenericAPIServer.NextDelegate(),
s.GenericAPIServer.Handler.GoRestfulContainer.RegisteredWebServices(),
s.openAPIConfig,
s.GenericAPIServer.Handler.NonGoRestfulMux)
if err != nil {
return preparedAPIAggregator{}, err
}
s.openAPIAggregationController = openapicontroller.NewAggregationController(&specDownloader, openAPIAggregator)
}
if s.openAPIV3Config != nil {
specDownloaderV3 := openapiv3aggregator.NewDownloader()
openAPIV3Aggregator, err := openapiv3aggregator.BuildAndRegisterAggregator(
specDownloaderV3,
s.GenericAPIServer.NextDelegate(),
s.GenericAPIServer.Handler.GoRestfulContainer,
s.openAPIV3Config,
s.GenericAPIServer.Handler.NonGoRestfulMux)
if err != nil {
return preparedAPIAggregator{}, err
}
s.openAPIV3AggregationController = openapiv3controller.NewAggregationController(openAPIV3Aggregator)
}
return preparedAPIAggregator{APIAggregator: s, runnable: prepared}, nil
}
func (s preparedAPIAggregator) Run(ctx context.Context) error {
return s.runnable.RunWithContext(ctx)
}
// AddAPIService adds an API service. It is not thread-safe, so only call it on one thread at a time please.
// It's a slow moving API, so its ok to run the controller on a single thread
func (s *APIAggregator) AddAPIService(apiService *v1.APIService) error {
// if the proxyHandler already exists, it needs to be updated. The aggregation bits do not
// since they are wired against listers because they require multiple resources to respond
if proxyHandler, exists := s.proxyHandlers[apiService.Name]; exists {
proxyHandler.updateAPIService(apiService)
if s.openAPIAggregationController != nil {
s.openAPIAggregationController.UpdateAPIService(proxyHandler, apiService)
}
if s.openAPIV3AggregationController != nil {
s.openAPIV3AggregationController.UpdateAPIService(proxyHandler, apiService)
}
// Forward calls to discovery manager to update discovery document
if s.discoveryAggregationController != nil {
handlerCopy := *proxyHandler
handlerCopy.setServiceAvailable()
s.discoveryAggregationController.AddAPIService(apiService, &handlerCopy)
}
return nil
}
proxyPath := "/apis/" + apiService.Spec.Group + "/" + apiService.Spec.Version
// v1. is a special case for the legacy API. It proxies to a wider set of endpoints.
if apiService.Name == legacyAPIServiceName {
proxyPath = "/api"
}
// register the proxy handler
proxyHandler := &proxyHandler{
localDelegate: s.delegateHandler,
proxyCurrentCertKeyContent: s.proxyCurrentCertKeyContent,
proxyTransportDial: s.proxyTransportDial,
serviceResolver: s.serviceResolver,
rejectForwardingRedirects: s.rejectForwardingRedirects,
tracerProvider: s.tracerProvider,
}
proxyHandler.updateAPIService(apiService)
if s.openAPIAggregationController != nil {
s.openAPIAggregationController.AddAPIService(proxyHandler, apiService)
}
if s.openAPIV3AggregationController != nil {
s.openAPIV3AggregationController.AddAPIService(proxyHandler, apiService)
}
if s.discoveryAggregationController != nil {
s.discoveryAggregationController.AddAPIService(apiService, proxyHandler)
}
s.proxyHandlers[apiService.Name] = proxyHandler
s.GenericAPIServer.Handler.NonGoRestfulMux.Handle(proxyPath, proxyHandler)
s.GenericAPIServer.Handler.NonGoRestfulMux.UnlistedHandlePrefix(proxyPath+"/", proxyHandler)
// if we're dealing with the legacy group, we're done here
if apiService.Name == legacyAPIServiceName {
return nil
}
// if we've already registered the path with the handler, we don't want to do it again.
versions, exist := s.handledGroupVersions[apiService.Spec.Group]
if exist {
versions.Insert(apiService.Spec.Version)
return nil
}
// it's time to register the group aggregation endpoint
groupPath := "/apis/" + apiService.Spec.Group
groupDiscoveryHandler := &apiGroupHandler{
codecs: aggregatorscheme.Codecs,
groupName: apiService.Spec.Group,
lister: s.lister,
delegate: s.delegateHandler,
}
// aggregation is protected
s.GenericAPIServer.Handler.NonGoRestfulMux.Handle(groupPath, groupDiscoveryHandler)
s.GenericAPIServer.Handler.NonGoRestfulMux.UnlistedHandle(groupPath+"/", groupDiscoveryHandler)
s.handledGroupVersions[apiService.Spec.Group] = sets.New[string](apiService.Spec.Version)
return nil
}
// RemoveAPIService removes the APIService from being handled. It is not thread-safe, so only call it on one thread at a time please.
// It's a slow moving API, so it's ok to run the controller on a single thread.
func (s *APIAggregator) RemoveAPIService(apiServiceName string) {
// Forward calls to discovery manager to update discovery document
if s.discoveryAggregationController != nil {
s.discoveryAggregationController.RemoveAPIService(apiServiceName)
}
version := v1helper.APIServiceNameToGroupVersion(apiServiceName)
proxyPath := "/apis/" + version.Group + "/" + version.Version
// v1. is a special case for the legacy API. It proxies to a wider set of endpoints.
if apiServiceName == legacyAPIServiceName {
proxyPath = "/api"
}
s.GenericAPIServer.Handler.NonGoRestfulMux.Unregister(proxyPath)
s.GenericAPIServer.Handler.NonGoRestfulMux.Unregister(proxyPath + "/")
if s.openAPIAggregationController != nil {
s.openAPIAggregationController.RemoveAPIService(apiServiceName)
}
if s.openAPIV3AggregationController != nil {
s.openAPIV3AggregationController.RemoveAPIService(apiServiceName)
}
delete(s.proxyHandlers, apiServiceName)
versions, exist := s.handledGroupVersions[version.Group]
if !exist {
return
}
versions.Delete(version.Version)
if versions.Len() > 0 {
return
}
delete(s.handledGroupVersions, version.Group)
groupPath := "/apis/" + version.Group
s.GenericAPIServer.Handler.NonGoRestfulMux.Unregister(groupPath)
s.GenericAPIServer.Handler.NonGoRestfulMux.Unregister(groupPath + "/")
}
// DefaultAPIResourceConfigSource returns default configuration for an APIResource.
func DefaultAPIResourceConfigSource() *serverstorage.ResourceConfig {
ret := serverstorage.NewResourceConfig()
// NOTE: GroupVersions listed here will be enabled by default. Don't put alpha versions in the list.
ret.EnableVersions(
v1.SchemeGroupVersion,
v1beta1.SchemeGroupVersion,
)
return ret
}

View File

@ -1,212 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"fmt"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/dynamiccertificates"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
informers "k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1"
listers "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1"
"k8s.io/kube-aggregator/pkg/controllers"
)
// APIHandlerManager defines the behaviour that an API handler should have.
type APIHandlerManager interface {
AddAPIService(apiService *v1.APIService) error
RemoveAPIService(apiServiceName string)
}
// APIServiceRegistrationController is responsible for registering and removing API services.
type APIServiceRegistrationController struct {
apiHandlerManager APIHandlerManager
apiServiceLister listers.APIServiceLister
apiServiceSynced cache.InformerSynced
// To allow injection for testing.
syncFn func(key string) error
queue workqueue.TypedRateLimitingInterface[string]
}
var _ dynamiccertificates.Listener = &APIServiceRegistrationController{}
// NewAPIServiceRegistrationController returns a new APIServiceRegistrationController.
func NewAPIServiceRegistrationController(apiServiceInformer informers.APIServiceInformer, apiHandlerManager APIHandlerManager) *APIServiceRegistrationController {
c := &APIServiceRegistrationController{
apiHandlerManager: apiHandlerManager,
apiServiceLister: apiServiceInformer.Lister(),
apiServiceSynced: apiServiceInformer.Informer().HasSynced,
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "APIServiceRegistrationController"},
),
}
apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addAPIService,
UpdateFunc: c.updateAPIService,
DeleteFunc: c.deleteAPIService,
})
c.syncFn = c.sync
return c
}
func (c *APIServiceRegistrationController) sync(key string) error {
apiService, err := c.apiServiceLister.Get(key)
if apierrors.IsNotFound(err) {
c.apiHandlerManager.RemoveAPIService(key)
return nil
}
if err != nil {
return err
}
return c.apiHandlerManager.AddAPIService(apiService)
}
// Run starts APIServiceRegistrationController which will process all registration requests until stopCh is closed.
func (c *APIServiceRegistrationController) Run(stopCh <-chan struct{}, handlerSyncedCh chan<- struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.Info("Starting APIServiceRegistrationController")
defer klog.Info("Shutting down APIServiceRegistrationController")
if !controllers.WaitForCacheSync("APIServiceRegistrationController", stopCh, c.apiServiceSynced) {
return
}
/// initially sync all APIServices to make sure the proxy handler is complete
if err := wait.PollImmediateUntil(time.Second, func() (bool, error) {
services, err := c.apiServiceLister.List(labels.Everything())
if err != nil {
utilruntime.HandleError(fmt.Errorf("failed to initially list APIServices: %v", err))
return false, nil
}
for _, s := range services {
if err := c.apiHandlerManager.AddAPIService(s); err != nil {
utilruntime.HandleError(fmt.Errorf("failed to initially sync APIService %s: %v", s.Name, err))
return false, nil
}
}
return true, nil
}, stopCh); err == wait.ErrWaitTimeout {
utilruntime.HandleError(fmt.Errorf("timed out waiting for proxy handler to initialize"))
return
} else if err != nil {
panic(fmt.Errorf("unexpected error: %v", err))
}
close(handlerSyncedCh)
// only start one worker thread since its a slow moving API and the aggregation server adding bits
// aren't threadsafe
go wait.Until(c.runWorker, time.Second, stopCh)
<-stopCh
}
func (c *APIServiceRegistrationController) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (c *APIServiceRegistrationController) processNextWorkItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.syncFn(key)
if err == nil {
c.queue.Forget(key)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", key, err))
c.queue.AddRateLimited(key)
return true
}
func (c *APIServiceRegistrationController) enqueueInternal(obj *v1.APIService) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
klog.Errorf("Couldn't get key for object %#v: %v", obj, err)
return
}
c.queue.Add(key)
}
func (c *APIServiceRegistrationController) addAPIService(obj interface{}) {
castObj := obj.(*v1.APIService)
klog.V(4).Infof("Adding %s", castObj.Name)
c.enqueueInternal(castObj)
}
func (c *APIServiceRegistrationController) updateAPIService(obj, _ interface{}) {
castObj := obj.(*v1.APIService)
klog.V(4).Infof("Updating %s", castObj.Name)
c.enqueueInternal(castObj)
}
func (c *APIServiceRegistrationController) deleteAPIService(obj interface{}) {
castObj, ok := obj.(*v1.APIService)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
castObj, ok = tombstone.Obj.(*v1.APIService)
if !ok {
klog.Errorf("Tombstone contained object that is not expected %#v", obj)
return
}
}
klog.V(4).Infof("Deleting %q", castObj.Name)
c.enqueueInternal(castObj)
}
// Enqueue queues all apiservices to be rehandled.
// This method is used by the controller to notify when the proxy cert content changes.
func (c *APIServiceRegistrationController) Enqueue() {
apiServices, err := c.apiServiceLister.List(labels.Everything())
if err != nil {
utilruntime.HandleError(err)
return
}
for _, apiService := range apiServices {
c.addAPIService(apiService)
}
}

View File

@ -1,166 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"net/http"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
apiregistrationv1api "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper"
apiregistrationv1beta1api "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
listers "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1"
)
// apisHandler serves the `/apis` endpoint.
// This is registered as a filter so that it never collides with any explicitly registered endpoints
type apisHandler struct {
codecs serializer.CodecFactory
lister listers.APIServiceLister
discoveryGroup metav1.APIGroup
}
func discoveryGroup(enabledVersions sets.String) metav1.APIGroup {
retval := metav1.APIGroup{
Name: apiregistrationv1api.GroupName,
Versions: []metav1.GroupVersionForDiscovery{
{
GroupVersion: apiregistrationv1api.SchemeGroupVersion.String(),
Version: apiregistrationv1api.SchemeGroupVersion.Version,
},
},
PreferredVersion: metav1.GroupVersionForDiscovery{
GroupVersion: apiregistrationv1api.SchemeGroupVersion.String(),
Version: apiregistrationv1api.SchemeGroupVersion.Version,
},
}
if enabledVersions.Has(apiregistrationv1beta1api.SchemeGroupVersion.Version) {
retval.Versions = append(retval.Versions, metav1.GroupVersionForDiscovery{
GroupVersion: apiregistrationv1beta1api.SchemeGroupVersion.String(),
Version: apiregistrationv1beta1api.SchemeGroupVersion.Version,
})
}
return retval
}
func (r *apisHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
discoveryGroupList := &metav1.APIGroupList{
// always add OUR api group to the list first. Since we'll never have a registered APIService for it
// and since this is the crux of the API, having this first will give our names priority. It's good to be king.
Groups: []metav1.APIGroup{r.discoveryGroup},
}
apiServices, err := r.lister.List(labels.Everything())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
apiServicesByGroup := apiregistrationv1apihelper.SortedByGroupAndVersion(apiServices)
for _, apiGroupServers := range apiServicesByGroup {
// skip the legacy group
if len(apiGroupServers[0].Spec.Group) == 0 {
continue
}
discoveryGroup := convertToDiscoveryAPIGroup(apiGroupServers)
if discoveryGroup != nil {
discoveryGroupList.Groups = append(discoveryGroupList.Groups, *discoveryGroup)
}
}
responsewriters.WriteObjectNegotiated(r.codecs, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, w, req, http.StatusOK, discoveryGroupList, false)
}
// convertToDiscoveryAPIGroup takes apiservices in a single group and returns a discovery compatible object.
// if none of the services are available, it will return nil.
func convertToDiscoveryAPIGroup(apiServices []*apiregistrationv1api.APIService) *metav1.APIGroup {
apiServicesByGroup := apiregistrationv1apihelper.SortedByGroupAndVersion(apiServices)[0]
var discoveryGroup *metav1.APIGroup
for _, apiService := range apiServicesByGroup {
// the first APIService which is valid becomes the default
if discoveryGroup == nil {
discoveryGroup = &metav1.APIGroup{
Name: apiService.Spec.Group,
PreferredVersion: metav1.GroupVersionForDiscovery{
GroupVersion: apiService.Spec.Group + "/" + apiService.Spec.Version,
Version: apiService.Spec.Version,
},
}
}
discoveryGroup.Versions = append(discoveryGroup.Versions,
metav1.GroupVersionForDiscovery{
GroupVersion: apiService.Spec.Group + "/" + apiService.Spec.Version,
Version: apiService.Spec.Version,
},
)
}
return discoveryGroup
}
// apiGroupHandler serves the `/apis/<group>` endpoint.
type apiGroupHandler struct {
codecs serializer.CodecFactory
groupName string
lister listers.APIServiceLister
delegate http.Handler
}
func (r *apiGroupHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
apiServices, err := r.lister.List(labels.Everything())
if statusErr, ok := err.(*apierrors.StatusError); ok {
responsewriters.WriteRawJSON(int(statusErr.Status().Code), statusErr.Status(), w)
return
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
apiServicesForGroup := []*apiregistrationv1api.APIService{}
for _, apiService := range apiServices {
if apiService.Spec.Group == r.groupName {
apiServicesForGroup = append(apiServicesForGroup, apiService)
}
}
if len(apiServicesForGroup) == 0 {
r.delegate.ServeHTTP(w, req)
return
}
discoveryGroup := convertToDiscoveryAPIGroup(apiServicesForGroup)
if discoveryGroup == nil {
http.Error(w, "", http.StatusNotFound)
return
}
responsewriters.WriteObjectNegotiated(r.codecs, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, w, req, http.StatusOK, discoveryGroup, false)
}

View File

@ -1,664 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"errors"
"fmt"
"net/http"
"sync"
"time"
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1"
apidiscoveryv2conversion "k8s.io/apiserver/pkg/apis/apidiscovery/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/endpoints"
discoveryendpoint "k8s.io/apiserver/pkg/endpoints/discovery/aggregated"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/client-go/discovery"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper"
"k8s.io/kube-aggregator/pkg/apiserver/scheme"
)
var APIRegistrationGroupVersion metav1.GroupVersion = metav1.GroupVersion{Group: "apiregistration.k8s.io", Version: "v1"}
// Maximum is 20000. Set to higher than that so apiregistration always is listed
// first (mirrors v1 discovery behavior)
var APIRegistrationGroupPriority int = 20001
// Aggregated discovery content-type GVK.
var v2Beta1GVK = schema.GroupVersionKind{
Group: "apidiscovery.k8s.io",
Version: "v2beta1",
Kind: "APIGroupDiscoveryList",
}
var v2GVK = schema.GroupVersionKind{
Group: "apidiscovery.k8s.io",
Version: "v2",
Kind: "APIGroupDiscoveryList",
}
// Given a list of APIServices and proxyHandlers for contacting them,
// DiscoveryManager caches a list of discovery documents for each server
type DiscoveryAggregationController interface {
// Adds or Updates an APIService from the Aggregated Discovery Controller's
// knowledge base
// Thread-safe
AddAPIService(apiService *apiregistrationv1.APIService, handler http.Handler)
// Removes an APIService from the Aggregated Discovery Controller's Knowledge
// bank
// Thread-safe
RemoveAPIService(apiServiceName string)
// Spawns a worker which waits for added/updated apiservices and updates
// the unified discovery document by contacting the aggregated api services
Run(stopCh <-chan struct{}, discoverySyncedCh chan<- struct{})
}
type discoveryManager struct {
// Locks `apiServices`
servicesLock sync.RWMutex
// Map from APIService's name (or a unique string for local servers)
// to information about contacting that API Service
apiServices map[string]groupVersionInfo
// Locks cachedResults
resultsLock sync.RWMutex
// Map from APIService.Spec.Service to the previously fetched value
// (Note that many APIServices might use the same APIService.Spec.Service)
cachedResults map[serviceKey]cachedResult
// Queue of dirty apiServiceKey which need to be refreshed
// It is important that the reconciler for this queue does not excessively
// contact the apiserver if a key was enqueued before the server was last
// contacted.
dirtyAPIServiceQueue workqueue.TypedRateLimitingInterface[string]
// Merged handler which stores all known groupversions
mergedDiscoveryHandler discoveryendpoint.ResourceManager
// Codecs is the serializer used for decoding aggregated apiserver responses
codecs serializer.CodecFactory
}
// Version of Service/Spec with relevant fields for use as a cache key
type serviceKey struct {
Namespace string
Name string
Port int32
}
// Human-readable String representation used for logs
func (s serviceKey) String() string {
return fmt.Sprintf("%v/%v:%v", s.Namespace, s.Name, s.Port)
}
func newServiceKey(service apiregistrationv1.ServiceReference) serviceKey {
// Docs say. Defaults to 443 for compatibility reasons.
// BETA: Should this be a shared constant to avoid drifting with the
// implementation?
port := int32(443)
if service.Port != nil {
port = *service.Port
}
return serviceKey{
Name: service.Name,
Namespace: service.Namespace,
Port: port,
}
}
type cachedResult struct {
// Currently cached discovery document for this service
// Map from group name to version name to
discovery map[metav1.GroupVersion]apidiscoveryv2.APIVersionDiscovery
// ETag hash of the cached discoveryDocument
etag string
// Guaranteed to be a time less than the time the server responded with the
// discovery data.
lastUpdated time.Time
}
// Information about a specific APIService/GroupVersion
type groupVersionInfo struct {
// Date this APIService was marked dirty.
// Guaranteed to be a time greater than the most recent time the APIService
// was known to be modified.
//
// Used for request deduplication to ensure the data used to reconcile each
// apiservice was retrieved after the time of the APIService change:
// real_apiservice_change_time < groupVersionInfo.lastMarkedDirty < cachedResult.lastUpdated < real_document_fresh_time
//
// This ensures that if the apiservice was changed after the last cached entry
// was stored, the discovery document will always be re-fetched.
lastMarkedDirty time.Time
// ServiceReference of this GroupVersion. This identifies the Service which
// describes how to contact the server responsible for this GroupVersion.
service serviceKey
// groupPriority describes the priority of the APIService's group for sorting
groupPriority int
// groupPriority describes the priority of the APIService version for sorting
versionPriority int
// Method for contacting the service
handler http.Handler
}
var _ DiscoveryAggregationController = &discoveryManager{}
func NewDiscoveryManager(
target discoveryendpoint.ResourceManager,
) DiscoveryAggregationController {
discoveryScheme := runtime.NewScheme()
utilruntime.Must(apidiscoveryv2.AddToScheme(discoveryScheme))
utilruntime.Must(apidiscoveryv2beta1.AddToScheme(discoveryScheme))
// Register conversion for apidiscovery
utilruntime.Must(apidiscoveryv2conversion.RegisterConversions(discoveryScheme))
codecs := serializer.NewCodecFactory(discoveryScheme)
return &discoveryManager{
mergedDiscoveryHandler: target,
apiServices: make(map[string]groupVersionInfo),
cachedResults: make(map[serviceKey]cachedResult),
dirtyAPIServiceQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "discovery-manager"},
),
codecs: codecs,
}
}
// Returns discovery data for the given apiservice.
// Caches the result.
// Returns the cached result if it is retrieved after the apiservice was last
// marked dirty
// If there was an error in fetching, returns the stale cached result if it exists,
// and a non-nil error
// If the result is current, returns nil error and non-nil result
func (dm *discoveryManager) fetchFreshDiscoveryForService(gv metav1.GroupVersion, info groupVersionInfo) (*cachedResult, error) {
// Lookup last cached result for this apiservice's service.
cached, exists := dm.getCacheEntryForService(info.service)
// If entry exists and was updated after the given time, just stop now
if exists && cached.lastUpdated.After(info.lastMarkedDirty) {
return &cached, nil
}
// If we have a handler to contact the server for this APIService, and
// the cache entry is too old to use, refresh the cache entry now.
handler := http.TimeoutHandler(info.handler, 5*time.Second, "request timed out")
req, err := http.NewRequest("GET", "/apis", nil)
if err != nil {
// NewRequest should not fail, but if it does for some reason,
// log it and continue
return &cached, fmt.Errorf("failed to create http.Request: %v", err)
}
// Apply aggregator user to request
req = req.WithContext(
request.WithUser(
req.Context(), &user.DefaultInfo{Name: "system:kube-aggregator", Groups: []string{"system:masters"}}))
req = req.WithContext(request.WithRequestInfo(req.Context(), &request.RequestInfo{
Path: req.URL.Path,
IsResourceRequest: false,
}))
req.Header.Add("Accept", discovery.AcceptV2+","+discovery.AcceptV2Beta1)
if exists && len(cached.etag) > 0 {
req.Header.Add("If-None-Match", cached.etag)
}
// Important that the time recorded in the data's "lastUpdated" is conservatively
// from BEFORE the request is dispatched so that lastUpdated can be used to
// de-duplicate requests.
now := time.Now()
writer := newInMemoryResponseWriter()
handler.ServeHTTP(writer, req)
isV2Beta1GVK, _ := discovery.ContentTypeIsGVK(writer.Header().Get("Content-Type"), v2Beta1GVK)
isV2GVK, _ := discovery.ContentTypeIsGVK(writer.Header().Get("Content-Type"), v2GVK)
switch {
case writer.respCode == http.StatusNotModified:
// Keep old entry, update timestamp
cached = cachedResult{
discovery: cached.discovery,
etag: cached.etag,
lastUpdated: now,
}
dm.setCacheEntryForService(info.service, cached)
return &cached, nil
case writer.respCode == http.StatusServiceUnavailable:
return nil, fmt.Errorf("service %s returned non-success response code: %v",
info.service.String(), writer.respCode)
case writer.respCode == http.StatusOK && (isV2GVK || isV2Beta1GVK):
parsed := &apidiscoveryv2.APIGroupDiscoveryList{}
if err := runtime.DecodeInto(dm.codecs.UniversalDecoder(), writer.data, parsed); err != nil {
return nil, err
}
klog.V(3).Infof("DiscoveryManager: Successfully downloaded discovery for %s", info.service.String())
// Convert discovery info into a map for convenient lookup later
discoMap := map[metav1.GroupVersion]apidiscoveryv2.APIVersionDiscovery{}
for _, g := range parsed.Items {
for _, v := range g.Versions {
discoMap[metav1.GroupVersion{Group: g.Name, Version: v.Version}] = v
for i := range v.Resources {
// avoid nil panics in v0.26.0-v0.26.3 client-go clients
// see https://github.com/kubernetes/kubernetes/issues/118361
if v.Resources[i].ResponseKind == nil {
v.Resources[i].ResponseKind = &metav1.GroupVersionKind{}
}
for j := range v.Resources[i].Subresources {
if v.Resources[i].Subresources[j].ResponseKind == nil {
v.Resources[i].Subresources[j].ResponseKind = &metav1.GroupVersionKind{}
}
}
}
}
}
// Save cached result
cached = cachedResult{
discovery: discoMap,
etag: writer.Header().Get("Etag"),
lastUpdated: now,
}
dm.setCacheEntryForService(info.service, cached)
return &cached, nil
default:
// Could not get acceptable response for Aggregated Discovery.
// Fall back to legacy discovery information
if len(gv.Version) == 0 {
return nil, errors.New("not found")
}
var path string
if len(gv.Group) == 0 {
path = "/api/" + gv.Version
} else {
path = "/apis/" + gv.Group + "/" + gv.Version
}
req, err := http.NewRequest("GET", path, nil)
if err != nil {
// NewRequest should not fail, but if it does for some reason,
// log it and continue
return nil, fmt.Errorf("failed to create http.Request: %v", err)
}
// Apply aggregator user to request
req = req.WithContext(
request.WithUser(
req.Context(), &user.DefaultInfo{Name: "system:kube-aggregator"}))
// req.Header.Add("Accept", runtime.ContentTypeProtobuf)
req.Header.Add("Accept", runtime.ContentTypeJSON)
if exists && len(cached.etag) > 0 {
req.Header.Add("If-None-Match", cached.etag)
}
writer := newInMemoryResponseWriter()
handler.ServeHTTP(writer, req)
if writer.respCode != http.StatusOK {
return nil, fmt.Errorf("failed to download legacy discovery for %s: %v", path, writer.String())
}
parsed := &metav1.APIResourceList{}
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), writer.data, parsed); err != nil {
return nil, err
}
// Create a discomap with single group-version
resources, err := endpoints.ConvertGroupVersionIntoToDiscovery(parsed.APIResources)
if err != nil {
return nil, err
}
klog.V(3).Infof("DiscoveryManager: Successfully downloaded legacy discovery for %s", info.service.String())
discoMap := map[metav1.GroupVersion]apidiscoveryv2.APIVersionDiscovery{
// Convert old-style APIGroupList to new information
gv: {
Version: gv.Version,
Resources: resources,
},
}
cached = cachedResult{
discovery: discoMap,
lastUpdated: now,
}
// Do not save the resolve as the legacy fallback only fetches
// one group version and an API Service may serve multiple
// group versions.
return &cached, nil
}
}
// Try to sync a single APIService.
func (dm *discoveryManager) syncAPIService(apiServiceName string) error {
info, exists := dm.getInfoForAPIService(apiServiceName)
gv := helper.APIServiceNameToGroupVersion(apiServiceName)
mgv := metav1.GroupVersion{Group: gv.Group, Version: gv.Version}
if !exists {
// apiservice was removed. remove it from merged discovery
dm.mergedDiscoveryHandler.RemoveGroupVersion(mgv)
return nil
}
// Lookup last cached result for this apiservice's service.
cached, err := dm.fetchFreshDiscoveryForService(mgv, info)
var entry apidiscoveryv2.APIVersionDiscovery
// Extract the APIService's specific resource information from the
// groupversion
if cached == nil {
// There was an error fetching discovery for this APIService, and
// there is nothing in the cache for this GV.
//
// Just use empty GV to mark that GV exists, but no resources.
// Also mark that it is stale to indicate the fetch failed
// TODO: Maybe also stick in a status for the version the error?
entry = apidiscoveryv2.APIVersionDiscovery{
Version: gv.Version,
}
} else {
// Find our specific groupversion within the discovery document
entry, exists = cached.discovery[mgv]
if exists {
// The stale/fresh entry has our GV, so we can include it in the doc
} else {
// Successfully fetched discovery information from the server, but
// the server did not include this groupversion?
entry = apidiscoveryv2.APIVersionDiscovery{
Version: gv.Version,
}
}
}
// The entry's staleness depends upon if `fetchFreshDiscoveryForService`
// returned an error or not.
if err == nil {
entry.Freshness = apidiscoveryv2.DiscoveryFreshnessCurrent
} else {
entry.Freshness = apidiscoveryv2.DiscoveryFreshnessStale
}
dm.mergedDiscoveryHandler.AddGroupVersion(gv.Group, entry)
dm.mergedDiscoveryHandler.SetGroupVersionPriority(metav1.GroupVersion(gv), info.groupPriority, info.versionPriority)
return nil
}
func (dm *discoveryManager) getAPIServiceKeys() []string {
dm.servicesLock.RLock()
defer dm.servicesLock.RUnlock()
keys := []string{}
for key := range dm.apiServices {
keys = append(keys, key)
}
return keys
}
// Spawns a goroutine which waits for added/updated apiservices and updates
// the discovery document accordingly
func (dm *discoveryManager) Run(stopCh <-chan struct{}, discoverySyncedCh chan<- struct{}) {
klog.Info("Starting ResourceDiscoveryManager")
// Shutdown the queue since stopCh was signalled
defer dm.dirtyAPIServiceQueue.ShutDown()
// Ensure that apiregistration.k8s.io is the first group in the discovery group.
dm.mergedDiscoveryHandler.WithSource(discoveryendpoint.BuiltinSource).SetGroupVersionPriority(APIRegistrationGroupVersion, APIRegistrationGroupPriority, 0)
// Ensure that all APIServices are present before readiness check succeeds
var wg sync.WaitGroup
// Iterate on a copy of the keys to be thread safe with syncAPIService
keys := dm.getAPIServiceKeys()
for _, key := range keys {
wg.Add(1)
go func(k string) {
defer wg.Done()
// If an error was returned, the APIService will still have been
// added but marked as stale. Ignore the return value here
_ = dm.syncAPIService(k)
}(key)
}
wg.Wait()
if discoverySyncedCh != nil {
close(discoverySyncedCh)
}
// Spawn workers
// These workers wait for APIServices to be marked dirty.
// Worker ensures the cached discovery document hosted by the ServiceReference of
// the APIService is at least as fresh as the APIService, then includes the
// APIService's groupversion into the merged document
for i := 0; i < 2; i++ {
go func() {
for {
next, shutdown := dm.dirtyAPIServiceQueue.Get()
if shutdown {
return
}
func() {
defer dm.dirtyAPIServiceQueue.Done(next)
if err := dm.syncAPIService(next); err != nil {
dm.dirtyAPIServiceQueue.AddRateLimited(next)
} else {
dm.dirtyAPIServiceQueue.Forget(next)
}
}()
}
}()
}
wait.PollUntil(1*time.Minute, func() (done bool, err error) {
dm.servicesLock.Lock()
defer dm.servicesLock.Unlock()
now := time.Now()
// Mark all non-local APIServices as dirty
for key, info := range dm.apiServices {
info.lastMarkedDirty = now
dm.apiServices[key] = info
dm.dirtyAPIServiceQueue.Add(key)
}
return false, nil
}, stopCh)
}
// Takes a snapshot of all currently used services by known APIServices and
// purges the cache entries of those not present in the snapshot.
func (dm *discoveryManager) removeUnusedServices() {
usedServiceKeys := sets.Set[serviceKey]{}
func() {
dm.servicesLock.Lock()
defer dm.servicesLock.Unlock()
// Mark all non-local APIServices as dirty
for _, info := range dm.apiServices {
usedServiceKeys.Insert(info.service)
}
}()
// Avoids double lock. It is okay if a service is added/removed between these
// functions. This is just a cache and that should be infrequent.
func() {
dm.resultsLock.Lock()
defer dm.resultsLock.Unlock()
for key := range dm.cachedResults {
if !usedServiceKeys.Has(key) {
delete(dm.cachedResults, key)
}
}
}()
}
// Adds an APIService to be tracked by the discovery manager. If the APIService
// is already known
func (dm *discoveryManager) AddAPIService(apiService *apiregistrationv1.APIService, handler http.Handler) {
// If service is nil then its information is contained by a local APIService
// which is has already been added to the manager.
if apiService.Spec.Service == nil {
return
}
// Add or update APIService record and mark it as dirty
dm.setInfoForAPIService(apiService.Name, &groupVersionInfo{
groupPriority: int(apiService.Spec.GroupPriorityMinimum),
versionPriority: int(apiService.Spec.VersionPriority),
handler: handler,
lastMarkedDirty: time.Now(),
service: newServiceKey(*apiService.Spec.Service),
})
dm.removeUnusedServices()
dm.dirtyAPIServiceQueue.Add(apiService.Name)
}
func (dm *discoveryManager) RemoveAPIService(apiServiceName string) {
if dm.setInfoForAPIService(apiServiceName, nil) != nil {
// mark dirty if there was actually something deleted
dm.removeUnusedServices()
dm.dirtyAPIServiceQueue.Add(apiServiceName)
}
}
//
// Lock-protected accessors
//
func (dm *discoveryManager) getCacheEntryForService(key serviceKey) (cachedResult, bool) {
dm.resultsLock.RLock()
defer dm.resultsLock.RUnlock()
result, ok := dm.cachedResults[key]
return result, ok
}
func (dm *discoveryManager) setCacheEntryForService(key serviceKey, result cachedResult) {
dm.resultsLock.Lock()
defer dm.resultsLock.Unlock()
dm.cachedResults[key] = result
}
func (dm *discoveryManager) getInfoForAPIService(name string) (groupVersionInfo, bool) {
dm.servicesLock.RLock()
defer dm.servicesLock.RUnlock()
result, ok := dm.apiServices[name]
return result, ok
}
func (dm *discoveryManager) setInfoForAPIService(name string, result *groupVersionInfo) (oldValueIfExisted *groupVersionInfo) {
dm.servicesLock.Lock()
defer dm.servicesLock.Unlock()
if oldValue, exists := dm.apiServices[name]; exists {
oldValueIfExisted = &oldValue
}
if result != nil {
dm.apiServices[name] = *result
} else {
delete(dm.apiServices, name)
}
return oldValueIfExisted
}
// !TODO: This was copied from staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/downloader.go
// which was copied from staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/downloader.go
// so we should find a home for this
// inMemoryResponseWriter is a http.Writer that keep the response in memory.
type inMemoryResponseWriter struct {
writeHeaderCalled bool
header http.Header
respCode int
data []byte
}
func newInMemoryResponseWriter() *inMemoryResponseWriter {
return &inMemoryResponseWriter{header: http.Header{}}
}
func (r *inMemoryResponseWriter) Header() http.Header {
return r.header
}
func (r *inMemoryResponseWriter) WriteHeader(code int) {
r.writeHeaderCalled = true
r.respCode = code
}
func (r *inMemoryResponseWriter) Write(in []byte) (int, error) {
if !r.writeHeaderCalled {
r.WriteHeader(http.StatusOK)
}
r.data = append(r.data, in...)
return len(in), nil
}
func (r *inMemoryResponseWriter) String() string {
s := fmt.Sprintf("ResponseCode: %d", r.respCode)
if r.data != nil {
s += fmt.Sprintf(", Body: %s", string(r.data))
}
if r.header != nil {
s += fmt.Sprintf(", Header: %s", r.header)
}
return s
}

View File

@ -1,245 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"net/http"
"net/url"
"sync/atomic"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/proxy"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
endpointmetrics "k8s.io/apiserver/pkg/endpoints/metrics"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
apiserverproxyutil "k8s.io/apiserver/pkg/util/proxy"
"k8s.io/apiserver/pkg/util/x509metrics"
"k8s.io/client-go/transport"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
apiregistrationv1api "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper"
)
const (
aggregatorComponent string = "aggregator"
)
type certKeyFunc func() ([]byte, []byte)
// proxyHandler provides a http.Handler which will proxy traffic to locations
// specified by items implementing Redirector.
type proxyHandler struct {
// localDelegate is used to satisfy local APIServices
localDelegate http.Handler
// proxyCurrentCertKeyContent holds the client cert used to identify this proxy. Backing APIServices use this to confirm the proxy's identity
proxyCurrentCertKeyContent certKeyFunc
proxyTransportDial *transport.DialHolder
// Endpoints based routing to map from cluster IP to routable IP
serviceResolver ServiceResolver
handlingInfo atomic.Value
// reject to forward redirect response
rejectForwardingRedirects bool
// tracerProvider is used to wrap the proxy transport and handler with tracing
tracerProvider tracing.TracerProvider
}
type proxyHandlingInfo struct {
// local indicates that this APIService is locally satisfied
local bool
// name is the name of the APIService
name string
// transportConfig holds the information for building a roundtripper
transportConfig *transport.Config
// transportBuildingError is an error produced while building the transport. If this
// is non-nil, it will be reported to clients.
transportBuildingError error
// proxyRoundTripper is the re-useable portion of the transport. It does not vary with any request.
proxyRoundTripper http.RoundTripper
// serviceName is the name of the service this handler proxies to
serviceName string
// namespace is the namespace the service lives in
serviceNamespace string
// serviceAvailable indicates this APIService is available or not
serviceAvailable bool
// servicePort is the port of the service this handler proxies to
servicePort int32
}
func proxyError(w http.ResponseWriter, req *http.Request, error string, code int) {
http.Error(w, error, code)
ctx := req.Context()
info, ok := genericapirequest.RequestInfoFrom(ctx)
if !ok {
klog.Warning("no RequestInfo found in the context")
return
}
// TODO: record long-running request differently? The long-running check func does not necessarily match the one of the aggregated apiserver
endpointmetrics.RecordRequestTermination(req, info, aggregatorComponent, code)
}
func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
value := r.handlingInfo.Load()
if value == nil {
r.localDelegate.ServeHTTP(w, req)
return
}
handlingInfo := value.(proxyHandlingInfo)
if handlingInfo.local {
if r.localDelegate == nil {
http.Error(w, "", http.StatusNotFound)
return
}
r.localDelegate.ServeHTTP(w, req)
return
}
if !handlingInfo.serviceAvailable {
proxyError(w, req, "service unavailable", http.StatusServiceUnavailable)
return
}
if handlingInfo.transportBuildingError != nil {
proxyError(w, req, handlingInfo.transportBuildingError.Error(), http.StatusInternalServerError)
return
}
user, ok := genericapirequest.UserFrom(req.Context())
if !ok {
proxyError(w, req, "missing user", http.StatusInternalServerError)
return
}
// write a new location based on the existing request pointed at the target service
location := &url.URL{}
location.Scheme = "https"
rloc, err := r.serviceResolver.ResolveEndpoint(handlingInfo.serviceNamespace, handlingInfo.serviceName, handlingInfo.servicePort)
if err != nil {
klog.Errorf("error resolving %s/%s: %v", handlingInfo.serviceNamespace, handlingInfo.serviceName, err)
proxyError(w, req, "service unavailable", http.StatusServiceUnavailable)
return
}
location.Host = rloc.Host
location.Path = req.URL.Path
location.RawQuery = req.URL.Query().Encode()
newReq, cancelFn := apiserverproxyutil.NewRequestForProxy(location, req)
defer cancelFn()
if handlingInfo.proxyRoundTripper == nil {
proxyError(w, req, "", http.StatusNotFound)
return
}
proxyRoundTripper := handlingInfo.proxyRoundTripper
upgrade := httpstream.IsUpgradeRequest(req)
proxyRoundTripper = transport.NewAuthProxyRoundTripper(user.GetName(), user.GetGroups(), user.GetExtra(), proxyRoundTripper)
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) && !upgrade {
tracingWrapper := tracing.WrapperFor(r.tracerProvider)
proxyRoundTripper = tracingWrapper(proxyRoundTripper)
}
// If we are upgrading, then the upgrade path tries to use this request with the TLS config we provide, but it does
// NOT use the proxyRoundTripper. It's a direct dial that bypasses the proxyRoundTripper. This means that we have to
// attach the "correct" user headers to the request ahead of time.
if upgrade {
transport.SetAuthProxyHeaders(newReq, user.GetName(), user.GetGroups(), user.GetExtra())
}
handler := proxy.NewUpgradeAwareHandler(location, proxyRoundTripper, true, upgrade, &responder{w: w})
if r.rejectForwardingRedirects {
handler.RejectForwardingRedirects = true
}
utilflowcontrol.RequestDelegated(req.Context())
handler.ServeHTTP(w, newReq)
}
// responder implements rest.Responder for assisting a connector in writing objects or errors.
type responder struct {
w http.ResponseWriter
}
// TODO this should properly handle content type negotiation
// if the caller asked for protobuf and you write JSON bad things happen.
func (r *responder) Object(statusCode int, obj runtime.Object) {
responsewriters.WriteRawJSON(statusCode, obj, r.w)
}
func (r *responder) Error(_ http.ResponseWriter, _ *http.Request, err error) {
http.Error(r.w, err.Error(), http.StatusServiceUnavailable)
}
// these methods provide locked access to fields
// Sets serviceAvailable value on proxyHandler
// not thread safe
func (r *proxyHandler) setServiceAvailable() {
info := r.handlingInfo.Load().(proxyHandlingInfo)
info.serviceAvailable = true
r.handlingInfo.Store(info)
}
func (r *proxyHandler) updateAPIService(apiService *apiregistrationv1api.APIService) {
if apiService.Spec.Service == nil {
r.handlingInfo.Store(proxyHandlingInfo{local: true})
return
}
proxyClientCert, proxyClientKey := r.proxyCurrentCertKeyContent()
transportConfig := &transport.Config{
TLS: transport.TLSConfig{
Insecure: apiService.Spec.InsecureSkipTLSVerify,
ServerName: apiService.Spec.Service.Name + "." + apiService.Spec.Service.Namespace + ".svc",
CertData: proxyClientCert,
KeyData: proxyClientKey,
CAData: apiService.Spec.CABundle,
},
DialHolder: r.proxyTransportDial,
}
transportConfig.Wrap(x509metrics.NewDeprecatedCertificateRoundTripperWrapperConstructor(
x509MissingSANCounter,
x509InsecureSHA1Counter,
))
newInfo := proxyHandlingInfo{
name: apiService.Name,
transportConfig: transportConfig,
serviceName: apiService.Spec.Service.Name,
serviceNamespace: apiService.Spec.Service.Namespace,
servicePort: *apiService.Spec.Service.Port,
serviceAvailable: apiregistrationv1apihelper.IsAPIServiceConditionTrue(apiService, apiregistrationv1api.Available),
}
newInfo.proxyRoundTripper, newInfo.transportBuildingError = transport.New(newInfo.transportConfig)
if newInfo.transportBuildingError != nil {
klog.Warning(newInfo.transportBuildingError.Error())
}
r.handlingInfo.Store(newInfo)
}

View File

@ -1,52 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
var x509MissingSANCounter = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: "kube_aggregator",
Namespace: "apiserver",
Name: "x509_missing_san_total",
Help: "Counts the number of requests to servers missing SAN extension " +
"in their serving certificate OR the number of connection failures " +
"due to the lack of x509 certificate SAN extension missing " +
"(either/or, based on the runtime environment)",
StabilityLevel: metrics.ALPHA,
},
)
var x509InsecureSHA1Counter = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: "kube_aggregator",
Namespace: "apiserver",
Name: "x509_insecure_sha1_total",
Help: "Counts the number of requests to servers with insecure SHA1 signatures " +
"in their serving certificate OR the number of connection failures " +
"due to the insecure SHA1 signatures (either/or, based on the runtime environment)",
StabilityLevel: metrics.ALPHA,
},
)
func init() {
legacyregistry.MustRegister(x509MissingSANCounter)
legacyregistry.MustRegister(x509InsecureSHA1Counter)
}

View File

@ -1,84 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"net/url"
"k8s.io/apiserver/pkg/util/proxy"
listersv1 "k8s.io/client-go/listers/core/v1"
)
// A ServiceResolver knows how to get a URL given a service.
type ServiceResolver interface {
ResolveEndpoint(namespace, name string, port int32) (*url.URL, error)
}
// NewEndpointServiceResolver returns a ServiceResolver that chooses one of the
// service's endpoints.
func NewEndpointServiceResolver(services listersv1.ServiceLister, endpoints listersv1.EndpointsLister) ServiceResolver {
return &aggregatorEndpointRouting{
services: services,
endpoints: endpoints,
}
}
type aggregatorEndpointRouting struct {
services listersv1.ServiceLister
endpoints listersv1.EndpointsLister
}
func (r *aggregatorEndpointRouting) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) {
return proxy.ResolveEndpoint(r.services, r.endpoints, namespace, name, port)
}
// NewClusterIPServiceResolver returns a ServiceResolver that directly calls the
// service's cluster IP.
func NewClusterIPServiceResolver(services listersv1.ServiceLister) ServiceResolver {
return &aggregatorClusterRouting{
services: services,
}
}
type aggregatorClusterRouting struct {
services listersv1.ServiceLister
}
func (r *aggregatorClusterRouting) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) {
return proxy.ResolveCluster(r.services, namespace, name, port)
}
// NewLoopbackServiceResolver returns a ServiceResolver that routes
// the kubernetes/default service with port 443 to loopback.
func NewLoopbackServiceResolver(delegate ServiceResolver, host *url.URL) ServiceResolver {
return &loopbackResolver{
delegate: delegate,
host: host,
}
}
type loopbackResolver struct {
delegate ServiceResolver
host *url.URL
}
func (r *loopbackResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) {
if namespace == "default" && name == "kubernetes" && port == 443 {
return r.host, nil
}
return r.delegate.ResolveEndpoint(namespace, name, port)
}

View File

@ -1,36 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kube-aggregator/pkg/apis/apiregistration/install"
)
var (
// Scheme defines methods for serializing and deserializing API objects.
Scheme = runtime.NewScheme()
// Codecs provides methods for retrieving codecs and serializers for specific
// versions and content types.
Codecs = serializer.NewCodecFactory(Scheme)
)
func init() {
install.Install(Scheme)
}

View File

@ -1,54 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package apiregistration
import (
v1 "k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1"
v1beta1 "k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1"
internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1 provides access to shared informers for resources in V1.
V1() v1.Interface
// V1beta1 provides access to shared informers for resources in V1beta1.
V1beta1() v1beta1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1 returns a new v1.Interface.
func (g *group) V1() v1.Interface {
return v1.New(g.factory, g.namespace, g.tweakListOptions)
}
// V1beta1 returns a new v1beta1.Interface.
func (g *group) V1beta1() v1beta1.Interface {
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
}

View File

@ -1,89 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
"context"
time "time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
clientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces"
v1 "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1"
)
// APIServiceInformer provides access to a shared informer and lister for
// APIServices.
type APIServiceInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.APIServiceLister
}
type aPIServiceInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// NewAPIServiceInformer constructs a new informer for APIService type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewAPIServiceInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredAPIServiceInformer(client, resyncPeriod, indexers, nil)
}
// NewFilteredAPIServiceInformer constructs a new informer for APIService type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredAPIServiceInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ApiregistrationV1().APIServices().List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ApiregistrationV1().APIServices().Watch(context.TODO(), options)
},
},
&apiregistrationv1.APIService{},
resyncPeriod,
indexers,
)
}
func (f *aPIServiceInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredAPIServiceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *aPIServiceInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apiregistrationv1.APIService{}, f.defaultInformer)
}
func (f *aPIServiceInformer) Lister() v1.APIServiceLister {
return v1.NewAPIServiceLister(f.Informer().GetIndexer())
}

View File

@ -1,45 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// APIServices returns a APIServiceInformer.
APIServices() APIServiceInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// APIServices returns a APIServiceInformer.
func (v *version) APIServices() APIServiceInformer {
return &aPIServiceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}

View File

@ -1,89 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
"context"
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
clientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces"
v1beta1 "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1"
)
// APIServiceInformer provides access to a shared informer and lister for
// APIServices.
type APIServiceInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1beta1.APIServiceLister
}
type aPIServiceInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// NewAPIServiceInformer constructs a new informer for APIService type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewAPIServiceInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredAPIServiceInformer(client, resyncPeriod, indexers, nil)
}
// NewFilteredAPIServiceInformer constructs a new informer for APIService type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredAPIServiceInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ApiregistrationV1beta1().APIServices().List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ApiregistrationV1beta1().APIServices().Watch(context.TODO(), options)
},
},
&apiregistrationv1beta1.APIService{},
resyncPeriod,
indexers,
)
}
func (f *aPIServiceInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredAPIServiceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *aPIServiceInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&apiregistrationv1beta1.APIService{}, f.defaultInformer)
}
func (f *aPIServiceInformer) Lister() v1beta1.APIServiceLister {
return v1beta1.NewAPIServiceLister(f.Informer().GetIndexer())
}

View File

@ -1,45 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// APIServices returns a APIServiceInformer.
APIServices() APIServiceInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// APIServices returns a APIServiceInformer.
func (v *version) APIServices() APIServiceInformer {
return &aPIServiceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}

View File

@ -1,262 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
clientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
apiregistration "k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration"
internalinterfaces "k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client clientset.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
transform cache.TransformFunc
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
// wg tracks how many goroutines were started.
wg sync.WaitGroup
// shuttingDown is true when Shutdown has been called. It may still be running
// because it needs to wait for goroutines.
shuttingDown bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// WithTransform sets a transform on all informers.
func WithTransform(transform cache.TransformFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.transform = transform
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client clientset.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client clientset.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.shuttingDown {
return
}
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
f.wg.Add(1)
// We need a new variable in each loop iteration,
// otherwise the goroutine would use the loop variable
// and that keeps changing.
informer := informer
go func() {
defer f.wg.Done()
informer.Run(stopCh)
}()
f.startedInformers[informerType] = true
}
}
}
func (f *sharedInformerFactory) Shutdown() {
f.lock.Lock()
f.shuttingDown = true
f.lock.Unlock()
// Will return immediately if there is nothing to wait for.
f.wg.Wait()
}
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
informer.SetTransform(f.transform)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
//
// It is typically used like this:
//
// ctx, cancel := context.Background()
// defer cancel()
// factory := NewSharedInformerFactory(client, resyncPeriod)
// defer factory.WaitForStop() // Returns immediately if nothing was started.
// genericInformer := factory.ForResource(resource)
// typedInformer := factory.SomeAPIGroup().V1().SomeType()
// factory.Start(ctx.Done()) // Start processing these informers.
// synced := factory.WaitForCacheSync(ctx.Done())
// for v, ok := range synced {
// if !ok {
// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
// return
// }
// }
//
// // Creating informers can also be created after Start, but then
// // Start must be called again:
// anotherGenericInformer := factory.ForResource(resource)
// factory.Start(ctx.Done())
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
// Start initializes all requested informers. They are handled in goroutines
// which run until the stop channel gets closed.
// Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
Start(stopCh <-chan struct{})
// Shutdown marks a factory as shutting down. At that point no new
// informers can be started anymore and Start will return without
// doing anything.
//
// In addition, Shutdown blocks until all goroutines have terminated. For that
// to happen, the close channel(s) that they were started with must be closed,
// either before Shutdown gets called or while it is waiting.
//
// Shutdown may be called multiple times, even concurrently. All such calls will
// block until all goroutines have terminated.
Shutdown()
// WaitForCacheSync blocks until all started informers' caches were synced
// or the stop channel gets closed.
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
// ForResource gives generic access to a shared informer of the matching type.
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
Apiregistration() apiregistration.Interface
}
func (f *sharedInformerFactory) Apiregistration() apiregistration.Interface {
return apiregistration.New(f, f.namespace, f.tweakListOptions)
}

View File

@ -1,67 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
"fmt"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
v1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
)
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.GenericLister
}
type genericInformer struct {
informer cache.SharedIndexInformer
resource schema.GroupResource
}
// Informer returns the SharedIndexInformer.
func (f *genericInformer) Informer() cache.SharedIndexInformer {
return f.informer
}
// Lister returns the GenericLister.
func (f *genericInformer) Lister() cache.GenericLister {
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
}
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=apiregistration.k8s.io, Version=v1
case v1.SchemeGroupVersion.WithResource("apiservices"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Apiregistration().V1().APIServices().Informer()}, nil
// Group=apiregistration.k8s.io, Version=v1beta1
case v1beta1.SchemeGroupVersion.WithResource("apiservices"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Apiregistration().V1beta1().APIServices().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}

View File

@ -1,40 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package internalinterfaces
import (
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
cache "k8s.io/client-go/tools/cache"
clientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
)
// NewInformerFunc takes clientset.Interface and time.Duration to return a SharedIndexInformer.
type NewInformerFunc func(clientset.Interface, time.Duration) cache.SharedIndexInformer
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
type SharedInformerFactory interface {
Start(stopCh <-chan struct{})
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
}
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
type TweakListOptionsFunc func(*v1.ListOptions)

View File

@ -1,48 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/listers"
"k8s.io/client-go/tools/cache"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
)
// APIServiceLister helps list APIServices.
// All objects returned here must be treated as read-only.
type APIServiceLister interface {
// List lists all APIServices in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1.APIService, err error)
// Get retrieves the APIService from the index for a given name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1.APIService, error)
APIServiceListerExpansion
}
// aPIServiceLister implements the APIServiceLister interface.
type aPIServiceLister struct {
listers.ResourceIndexer[*v1.APIService]
}
// NewAPIServiceLister returns a new APIServiceLister.
func NewAPIServiceLister(indexer cache.Indexer) APIServiceLister {
return &aPIServiceLister{listers.New[*v1.APIService](indexer, v1.Resource("apiservice"))}
}

View File

@ -1,23 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
// APIServiceListerExpansion allows custom methods to be added to
// APIServiceLister.
type APIServiceListerExpansion interface{}

View File

@ -1,48 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/listers"
"k8s.io/client-go/tools/cache"
v1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
)
// APIServiceLister helps list APIServices.
// All objects returned here must be treated as read-only.
type APIServiceLister interface {
// List lists all APIServices in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1beta1.APIService, err error)
// Get retrieves the APIService from the index for a given name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1beta1.APIService, error)
APIServiceListerExpansion
}
// aPIServiceLister implements the APIServiceLister interface.
type aPIServiceLister struct {
listers.ResourceIndexer[*v1beta1.APIService]
}
// NewAPIServiceLister returns a new APIServiceLister.
func NewAPIServiceLister(indexer cache.Indexer) APIServiceLister {
return &aPIServiceLister{listers.New[*v1beta1.APIService](indexer, v1beta1.Resource("apiservice"))}
}

View File

@ -1,23 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
// APIServiceListerExpansion allows custom methods to be added to
// APIServiceLister.
type APIServiceListerExpansion interface{}

View File

@ -1,41 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"fmt"
"k8s.io/klog/v2"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
)
// WaitForCacheSync is a wrapper around cache.WaitForCacheSync that generates log messages
// indicating that the controller identified by controllerName is waiting for syncs, followed by
// either a successful or failed sync.
func WaitForCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool {
klog.Infof("Waiting for caches to sync for %s controller", controllerName)
if !cache.WaitForCacheSync(stopCh, cacheSyncs...) {
utilruntime.HandleError(fmt.Errorf("Unable to sync caches for %s controller", controllerName))
return false
}
klog.Infof("Caches are synced for %s controller", controllerName)
return true
}

View File

@ -1,253 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aggregator
import (
"crypto/sha256"
"errors"
"fmt"
"net/http"
"sync"
"time"
restful "github.com/emicklei/go-restful/v3"
"k8s.io/apiserver/pkg/server"
"k8s.io/klog/v2"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"k8s.io/kube-openapi/pkg/aggregator"
"k8s.io/kube-openapi/pkg/builder"
"k8s.io/kube-openapi/pkg/cached"
"k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/common/restfuladapter"
"k8s.io/kube-openapi/pkg/handler"
"k8s.io/kube-openapi/pkg/validation/spec"
)
var ErrAPIServiceNotFound = errors.New("resource not found")
// SpecAggregator calls out to http handlers of APIServices and merges specs. It keeps state of the last
// known specs including the http etag.
type SpecAggregator interface {
AddUpdateAPIService(apiService *v1.APIService, handler http.Handler) error
// UpdateAPIServiceSpec updates the APIService. It returns ErrAPIServiceNotFound if the APIService doesn't exist.
UpdateAPIServiceSpec(apiServiceName string) error
RemoveAPIService(apiServiceName string)
}
const (
aggregatorUser = "system:aggregator"
specDownloadTimeout = time.Minute
localDelegateChainNamePattern = "k8s_internal_local_delegation_chain_%010d"
// A randomly generated UUID to differentiate local and remote eTags.
locallyGeneratedEtagPrefix = "\"6E8F849B434D4B98A569B9D7718876E9-"
)
// openAPISpecInfo is used to store OpenAPI specs.
// The apiService object is used to sort specs with their priorities.
type openAPISpecInfo struct {
apiService v1.APIService
// spec is the cached OpenAPI spec
spec cached.LastSuccess[*spec.Swagger]
// The downloader is used only for non-local apiservices to
// re-update the spec every so often.
// Calling Get() is not thread safe and should only be called by a single
// thread via the openapi controller.
downloader CacheableDownloader
}
type specAggregator struct {
// mutex protects the specsByAPIServiceName map and its contents.
mutex sync.Mutex
// Map of API Services' OpenAPI specs by their name
specsByAPIServiceName map[string]*openAPISpecInfo
// provided for dynamic OpenAPI spec
openAPIVersionedService *handler.OpenAPIService
downloader *Downloader
}
func buildAndRegisterSpecAggregatorForLocalServices(downloader *Downloader, aggregatorSpec *spec.Swagger, delegationHandlers []http.Handler, pathHandler common.PathHandler) *specAggregator {
s := &specAggregator{
downloader: downloader,
specsByAPIServiceName: map[string]*openAPISpecInfo{},
}
cachedAggregatorSpec := cached.Static(aggregatorSpec, "never-changes")
s.addLocalSpec(fmt.Sprintf(localDelegateChainNamePattern, 0), cachedAggregatorSpec)
for i, handler := range delegationHandlers {
name := fmt.Sprintf(localDelegateChainNamePattern, i+1)
spec := NewCacheableDownloader(name, downloader, handler)
s.addLocalSpec(name, spec)
}
s.openAPIVersionedService = handler.NewOpenAPIServiceLazy(s.buildMergeSpecLocked())
s.openAPIVersionedService.RegisterOpenAPIVersionedService("/openapi/v2", pathHandler)
return s
}
// BuildAndRegisterAggregator registered OpenAPI aggregator handler. This function is not thread safe as it only being called on startup.
func BuildAndRegisterAggregator(downloader *Downloader, delegationTarget server.DelegationTarget, webServices []*restful.WebService,
config *common.Config, pathHandler common.PathHandler) (SpecAggregator, error) {
aggregatorOpenAPISpec, err := builder.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config)
if err != nil {
return nil, err
}
aggregatorOpenAPISpec.Definitions = handler.PruneDefaults(aggregatorOpenAPISpec.Definitions)
var delegationHandlers []http.Handler
for delegate := delegationTarget; delegate != nil; delegate = delegate.NextDelegate() {
handler := delegate.UnprotectedHandler()
if handler == nil {
continue
}
// ignore errors for the empty delegate we attach at the end the chain
// atm the empty delegate returns 503 when the server hasn't been fully initialized
// and the spec downloader only silences 404s
if len(delegate.ListedPaths()) == 0 && delegate.NextDelegate() == nil {
continue
}
delegationHandlers = append(delegationHandlers, handler)
}
s := buildAndRegisterSpecAggregatorForLocalServices(downloader, aggregatorOpenAPISpec, delegationHandlers, pathHandler)
return s, nil
}
func (s *specAggregator) addLocalSpec(name string, cachedSpec cached.Value[*spec.Swagger]) {
service := v1.APIService{}
service.Name = name
info := &openAPISpecInfo{
apiService: service,
}
info.spec.Store(cachedSpec)
s.specsByAPIServiceName[name] = info
}
// buildMergeSpecLocked creates a new cached mergeSpec from the list of cached specs.
func (s *specAggregator) buildMergeSpecLocked() cached.Value[*spec.Swagger] {
apiServices := make([]*v1.APIService, 0, len(s.specsByAPIServiceName))
for k := range s.specsByAPIServiceName {
apiServices = append(apiServices, &s.specsByAPIServiceName[k].apiService)
}
sortByPriority(apiServices)
caches := make([]cached.Value[*spec.Swagger], len(apiServices))
for i, apiService := range apiServices {
caches[i] = &(s.specsByAPIServiceName[apiService.Name].spec)
}
return cached.MergeList(func(results []cached.Result[*spec.Swagger]) (*spec.Swagger, string, error) {
var merged *spec.Swagger
etags := make([]string, 0, len(results))
for _, specInfo := range results {
result, etag, err := specInfo.Get()
if err != nil {
// APIService name and err message will be included in
// the error message as part of decorateError
klog.Warning(err)
continue
}
if merged == nil {
merged = &spec.Swagger{}
*merged = *result
// Paths, Definitions and parameters are set by
// MergeSpecsIgnorePathConflictRenamingDefinitionsAndParameters
merged.Paths = nil
merged.Definitions = nil
merged.Parameters = nil
}
etags = append(etags, etag)
if err := aggregator.MergeSpecsIgnorePathConflictRenamingDefinitionsAndParameters(merged, result); err != nil {
return nil, "", fmt.Errorf("failed to build merge specs: %v", err)
}
}
// Printing the etags list is stable because it is sorted.
return merged, fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%#v", etags)))), nil
}, caches)
}
// updateServiceLocked updates the spec cache by downloading the latest
// version of the spec.
func (s *specAggregator) updateServiceLocked(name string) error {
specInfo, exists := s.specsByAPIServiceName[name]
if !exists {
return ErrAPIServiceNotFound
}
result, etag, err := specInfo.downloader.Get()
filteredResult := cached.Transform[*spec.Swagger](func(result *spec.Swagger, etag string, err error) (*spec.Swagger, string, error) {
if err != nil {
return nil, "", err
}
group := specInfo.apiService.Spec.Group
version := specInfo.apiService.Spec.Version
return aggregator.FilterSpecByPathsWithoutSideEffects(result, []string{"/apis/" + group + "/" + version + "/"}), etag, nil
}, cached.Result[*spec.Swagger]{Value: result, Etag: etag, Err: err})
specInfo.spec.Store(filteredResult)
return err
}
// UpdateAPIServiceSpec updates the api service. It is thread safe.
func (s *specAggregator) UpdateAPIServiceSpec(apiServiceName string) error {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.updateServiceLocked(apiServiceName)
}
// AddUpdateAPIService adds the api service. It is thread safe. If the
// apiservice already exists, it will be updated.
func (s *specAggregator) AddUpdateAPIService(apiService *v1.APIService, handler http.Handler) error {
if apiService.Spec.Service == nil {
return nil
}
s.mutex.Lock()
defer s.mutex.Unlock()
existingSpec, exists := s.specsByAPIServiceName[apiService.Name]
if !exists {
specInfo := &openAPISpecInfo{
apiService: *apiService,
downloader: NewCacheableDownloader(apiService.Name, s.downloader, handler),
}
specInfo.spec.Store(cached.Result[*spec.Swagger]{Err: fmt.Errorf("spec for apiservice %s is not yet available", apiService.Name)})
s.specsByAPIServiceName[apiService.Name] = specInfo
s.openAPIVersionedService.UpdateSpecLazy(s.buildMergeSpecLocked())
} else {
existingSpec.apiService = *apiService
existingSpec.downloader.UpdateHandler(handler)
}
return nil
}
// RemoveAPIService removes an api service from OpenAPI aggregation. If it does not exist, no error is returned.
// It is thread safe.
func (s *specAggregator) RemoveAPIService(apiServiceName string) {
s.mutex.Lock()
defer s.mutex.Unlock()
if _, exists := s.specsByAPIServiceName[apiServiceName]; !exists {
return
}
delete(s.specsByAPIServiceName, apiServiceName)
// Re-create the mergeSpec for the new list of apiservices
s.openAPIVersionedService.UpdateSpecLazy(s.buildMergeSpecLocked())
}

View File

@ -1,203 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aggregator
import (
"crypto/sha512"
"fmt"
"net/http"
"strings"
"sync/atomic"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kube-openapi/pkg/validation/spec"
)
type CacheableDownloader interface {
UpdateHandler(http.Handler)
Get() (*spec.Swagger, string, error)
}
// cacheableDownloader is a downloader that will always return the data
// and the etag.
type cacheableDownloader struct {
name string
downloader *Downloader
// handler is the http Handler for the apiservice that can be replaced
handler atomic.Pointer[http.Handler]
etag string
spec *spec.Swagger
}
// NewCacheableDownloader creates a downloader that also returns the etag, making it useful to use as a cached dependency.
func NewCacheableDownloader(apiServiceName string, downloader *Downloader, handler http.Handler) CacheableDownloader {
c := &cacheableDownloader{
name: apiServiceName,
downloader: downloader,
}
c.handler.Store(&handler)
return c
}
func (d *cacheableDownloader) UpdateHandler(handler http.Handler) {
d.handler.Store(&handler)
}
func (d *cacheableDownloader) Get() (*spec.Swagger, string, error) {
spec, etag, err := d.get()
if err != nil {
return spec, etag, fmt.Errorf("failed to download %v: %v", d.name, err)
}
return spec, etag, err
}
func (d *cacheableDownloader) get() (*spec.Swagger, string, error) {
h := *d.handler.Load()
swagger, etag, status, err := d.downloader.Download(h, d.etag)
if err != nil {
return nil, "", err
}
switch status {
case http.StatusNotModified:
// Nothing has changed, do nothing.
case http.StatusOK:
if swagger != nil {
d.etag = etag
d.spec = swagger
break
}
fallthrough
case http.StatusNotFound:
return nil, "", ErrAPIServiceNotFound
default:
return nil, "", fmt.Errorf("invalid status code: %v", status)
}
return d.spec, d.etag, nil
}
// Downloader is the OpenAPI downloader type. It will try to download spec from /openapi/v2 or /swagger.json endpoint.
type Downloader struct {
}
// NewDownloader creates a new OpenAPI Downloader.
func NewDownloader() Downloader {
return Downloader{}
}
func (s *Downloader) handlerWithUser(handler http.Handler, info user.Info) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
req = req.WithContext(request.WithUser(req.Context(), info))
handler.ServeHTTP(w, req)
})
}
func etagFor(data []byte) string {
return fmt.Sprintf("%s%X\"", locallyGeneratedEtagPrefix, sha512.Sum512(data))
}
// Download downloads openAPI spec from /openapi/v2 endpoint of the given handler.
// httpStatus is only valid if err == nil
func (s *Downloader) Download(handler http.Handler, etag string) (returnSpec *spec.Swagger, newEtag string, httpStatus int, err error) {
handler = s.handlerWithUser(handler, &user.DefaultInfo{Name: aggregatorUser})
handler = http.TimeoutHandler(handler, specDownloadTimeout, "request timed out")
req, err := http.NewRequest("GET", "/openapi/v2", nil)
if err != nil {
return nil, "", 0, err
}
req.Header.Add("Accept", "application/json")
// Only pass eTag if it is not generated locally
if len(etag) > 0 && !strings.HasPrefix(etag, locallyGeneratedEtagPrefix) {
req.Header.Add("If-None-Match", etag)
}
writer := newInMemoryResponseWriter()
handler.ServeHTTP(writer, req)
switch writer.respCode {
case http.StatusNotModified:
if len(etag) == 0 {
return nil, etag, http.StatusNotModified, fmt.Errorf("http.StatusNotModified is not allowed in absence of etag")
}
return nil, etag, http.StatusNotModified, nil
case http.StatusNotFound:
// Gracefully skip 404, assuming the server won't provide any spec
return nil, "", http.StatusNotFound, nil
case http.StatusOK:
openAPISpec := &spec.Swagger{}
if err := openAPISpec.UnmarshalJSON(writer.data); err != nil {
return nil, "", 0, err
}
newEtag = writer.Header().Get("Etag")
if len(newEtag) == 0 {
newEtag = etagFor(writer.data)
if len(etag) > 0 && strings.HasPrefix(etag, locallyGeneratedEtagPrefix) {
// The function call with an etag and server does not report an etag.
// That means this server does not support etag and the etag that passed
// to the function generated previously by us. Just compare etags and
// return StatusNotModified if they are the same.
if etag == newEtag {
return nil, etag, http.StatusNotModified, nil
}
}
}
return openAPISpec, newEtag, http.StatusOK, nil
default:
return nil, "", 0, fmt.Errorf("failed to retrieve openAPI spec, http error: %s", writer.String())
}
}
// inMemoryResponseWriter is a http.Writer that keep the response in memory.
type inMemoryResponseWriter struct {
writeHeaderCalled bool
header http.Header
respCode int
data []byte
}
func newInMemoryResponseWriter() *inMemoryResponseWriter {
return &inMemoryResponseWriter{header: http.Header{}}
}
func (r *inMemoryResponseWriter) Header() http.Header {
return r.header
}
func (r *inMemoryResponseWriter) WriteHeader(code int) {
r.writeHeaderCalled = true
r.respCode = code
}
func (r *inMemoryResponseWriter) Write(in []byte) (int, error) {
if !r.writeHeaderCalled {
r.WriteHeader(http.StatusOK)
}
r.data = append(r.data, in...)
return len(in), nil
}
func (r *inMemoryResponseWriter) String() string {
s := fmt.Sprintf("ResponseCode: %d", r.respCode)
if r.data != nil {
s += fmt.Sprintf(", Body: %s", string(r.data))
}
if r.header != nil {
s += fmt.Sprintf(", Header: %s", r.header)
}
return s
}

View File

@ -1,46 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aggregator
import (
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
var (
regenerationCounter = metrics.NewCounterVec(
&metrics.CounterOpts{
Name: "aggregator_openapi_v2_regeneration_count",
Help: "Counter of OpenAPI v2 spec regeneration count broken down by causing APIService name and reason.",
StabilityLevel: metrics.ALPHA,
},
[]string{"apiservice", "reason"},
)
regenerationDurationGauge = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Name: "aggregator_openapi_v2_regeneration_duration",
Help: "Gauge of OpenAPI v2 spec regeneration duration in seconds.",
StabilityLevel: metrics.ALPHA,
},
[]string{"reason"},
)
)
func init() {
legacyregistry.MustRegister(regenerationCounter)
legacyregistry.MustRegister(regenerationDurationGauge)
}

View File

@ -1,78 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aggregator
import (
"sort"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
)
// byPriority can be used in sort.Sort to sort specs with their priorities.
type byPriority struct {
apiServices []*apiregistrationv1.APIService
groupPriorities map[string]int32
}
func (a byPriority) Len() int { return len(a.apiServices) }
func (a byPriority) Swap(i, j int) {
a.apiServices[i], a.apiServices[j] = a.apiServices[j], a.apiServices[i]
}
func (a byPriority) Less(i, j int) bool {
// All local specs will come first
if a.apiServices[i].Spec.Service == nil && a.apiServices[j].Spec.Service != nil {
return true
}
if a.apiServices[i].Spec.Service != nil && a.apiServices[j].Spec.Service == nil {
return false
}
// WARNING: This will result in not following priorities for local APIServices.
if a.apiServices[i].Spec.Service == nil {
// Sort local specs with their name. This is the order in the delegation chain (aggregator first).
return a.apiServices[i].Name < a.apiServices[j].Name
}
var iPriority, jPriority int32
if a.apiServices[i].Spec.Group == a.apiServices[j].Spec.Group {
iPriority = a.apiServices[i].Spec.VersionPriority
jPriority = a.apiServices[i].Spec.VersionPriority
} else {
iPriority = a.groupPriorities[a.apiServices[i].Spec.Group]
jPriority = a.groupPriorities[a.apiServices[j].Spec.Group]
}
if iPriority != jPriority {
// Sort by priority, higher first
return iPriority > jPriority
}
// Sort by service name.
return a.apiServices[i].Name < a.apiServices[j].Name
}
func sortByPriority(apiServices []*apiregistrationv1.APIService) {
b := byPriority{
apiServices: apiServices,
groupPriorities: map[string]int32{},
}
for _, apiService := range apiServices {
if apiService.Spec.Service == nil {
continue
}
if pr, found := b.groupPriorities[apiService.Spec.Group]; !found || apiService.Spec.GroupPriorityMinimum > pr {
b.groupPriorities[apiService.Spec.Group] = apiService.Spec.GroupPriorityMinimum
}
}
sort.Sort(b)
}

View File

@ -1,165 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openapi
import (
"fmt"
"net/http"
"time"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator"
)
const (
successfulUpdateDelay = time.Minute
successfulUpdateDelayLocal = time.Second
failedUpdateMaxExpDelay = time.Hour
)
type syncAction int
const (
syncRequeue syncAction = iota
syncRequeueRateLimited
syncNothing
)
// AggregationController periodically check for changes in OpenAPI specs of APIServices and update/remove
// them if necessary.
type AggregationController struct {
openAPIAggregationManager aggregator.SpecAggregator
queue workqueue.TypedRateLimitingInterface[string]
downloader *aggregator.Downloader
// To allow injection for testing.
syncHandler func(key string) (syncAction, error)
}
// NewAggregationController creates new OpenAPI aggregation controller.
func NewAggregationController(downloader *aggregator.Downloader, openAPIAggregationManager aggregator.SpecAggregator) *AggregationController {
c := &AggregationController{
openAPIAggregationManager: openAPIAggregationManager,
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.NewTypedItemExponentialFailureRateLimiter[string](successfulUpdateDelay, failedUpdateMaxExpDelay),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "open_api_aggregation_controller"},
),
downloader: downloader,
}
c.syncHandler = c.sync
return c
}
// Run starts OpenAPI AggregationController
func (c *AggregationController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.Info("Starting OpenAPI AggregationController")
defer klog.Info("Shutting down OpenAPI AggregationController")
go wait.Until(c.runWorker, time.Second, stopCh)
<-stopCh
}
func (c *AggregationController) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (c *AggregationController) processNextWorkItem() bool {
key, quit := c.queue.Get()
defer c.queue.Done(key)
if quit {
return false
}
klog.V(4).Infof("OpenAPI AggregationController: Processing item %s", key)
action, err := c.syncHandler(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("loading OpenAPI spec for %q failed with: %v", key, err))
}
switch action {
case syncRequeue:
c.queue.AddAfter(key, successfulUpdateDelay)
case syncRequeueRateLimited:
klog.Infof("OpenAPI AggregationController: action for item %s: Rate Limited Requeue.", key)
c.queue.AddRateLimited(key)
case syncNothing:
c.queue.Forget(key)
}
return true
}
func (c *AggregationController) sync(key string) (syncAction, error) {
if err := c.openAPIAggregationManager.UpdateAPIServiceSpec(key); err != nil {
if err == aggregator.ErrAPIServiceNotFound {
return syncNothing, nil
} else {
return syncRequeueRateLimited, err
}
}
return syncRequeue, nil
}
// AddAPIService adds a new API Service to OpenAPI Aggregation.
func (c *AggregationController) AddAPIService(handler http.Handler, apiService *v1.APIService) {
if apiService.Spec.Service == nil {
return
}
if err := c.openAPIAggregationManager.AddUpdateAPIService(apiService, handler); err != nil {
utilruntime.HandleError(fmt.Errorf("adding %q to AggregationController failed with: %v", apiService.Name, err))
}
c.queue.AddAfter(apiService.Name, time.Second)
}
// UpdateAPIService updates API Service's info and handler.
func (c *AggregationController) UpdateAPIService(handler http.Handler, apiService *v1.APIService) {
if apiService.Spec.Service == nil {
return
}
if err := c.openAPIAggregationManager.UpdateAPIServiceSpec(apiService.Name); err != nil {
utilruntime.HandleError(fmt.Errorf("Error updating APIService %q with err: %v", apiService.Name, err))
}
key := apiService.Name
if c.queue.NumRequeues(key) > 0 {
// The item has failed before. Remove it from failure queue and
// update it in a second
c.queue.Forget(key)
c.queue.AddAfter(key, time.Second)
}
// Else: The item has been succeeded before and it will be updated soon (after successfulUpdateDelay)
// we don't add it again as it will cause a duplication of items.
}
// RemoveAPIService removes API Service from OpenAPI Aggregation Controller.
func (c *AggregationController) RemoveAPIService(apiServiceName string) {
c.openAPIAggregationManager.RemoveAPIService(apiServiceName)
// This will only remove it if it was failing before. If it was successful, processNextWorkItem will figure it out
// and will not add it again to the queue.
c.queue.Forget(apiServiceName)
}

View File

@ -1,319 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aggregator
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/emicklei/go-restful/v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux"
"k8s.io/apiserver/pkg/server/routes"
"k8s.io/klog/v2"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/handler3"
"k8s.io/kube-openapi/pkg/openapiconv"
v2aggregator "k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator"
)
var ErrAPIServiceNotFound = errors.New("resource not found")
// SpecProxier proxies OpenAPI V3 requests to their respective APIService
type SpecProxier interface {
AddUpdateAPIService(handler http.Handler, apiService *v1.APIService)
// UpdateAPIServiceSpec updates the APIService. It returns ErrAPIServiceNotFound if the APIService doesn't exist.
UpdateAPIServiceSpec(apiServiceName string) error
RemoveAPIServiceSpec(apiServiceName string)
GetAPIServiceNames() []string
}
const (
aggregatorUser = "system:aggregator"
specDownloadTimeout = 60 * time.Second
localDelegateChainNamePrefix = "k8s_internal_local_delegation_chain_"
localDelegateChainNamePattern = localDelegateChainNamePrefix + "%010d"
openAPIV2Converter = "openapiv2converter"
)
// IsLocalAPIService returns true for local specs from delegates.
func IsLocalAPIService(apiServiceName string) bool {
return strings.HasPrefix(apiServiceName, localDelegateChainNamePrefix)
}
// GetAPIServiceNames returns the names of APIServices recorded in apiServiceInfo.
// We use this function to pass the names of local APIServices to the controller in this package,
// so that the controller can periodically sync the OpenAPI spec from delegation API servers.
func (s *specProxier) GetAPIServiceNames() []string {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
names := make([]string, 0, len(s.apiServiceInfo))
for key := range s.apiServiceInfo {
names = append(names, key)
}
return names
}
// BuildAndRegisterAggregator registered OpenAPI aggregator handler. This function is not thread safe as it only being called on startup.
func BuildAndRegisterAggregator(downloader Downloader, delegationTarget server.DelegationTarget, aggregatorService *restful.Container, openAPIConfig *common.OpenAPIV3Config, pathHandler common.PathHandlerByGroupVersion) (SpecProxier, error) {
s := &specProxier{
apiServiceInfo: map[string]*openAPIV3APIServiceInfo{},
downloader: downloader,
}
if aggregatorService != nil && openAPIConfig != nil {
// Make native types exposed by aggregator available to the aggregated
// OpenAPI (normal handle is disabled by skipOpenAPIInstallation option)
aggregatorLocalServiceName := "k8s_internal_local_kube_aggregator_types"
v3Mux := mux.NewPathRecorderMux(aggregatorLocalServiceName)
_ = routes.OpenAPI{
V3Config: openAPIConfig,
}.InstallV3(aggregatorService, v3Mux)
s.AddUpdateAPIService(v3Mux, &v1.APIService{
ObjectMeta: metav1.ObjectMeta{
Name: aggregatorLocalServiceName,
},
})
s.UpdateAPIServiceSpec(aggregatorLocalServiceName)
}
i := 1
for delegate := delegationTarget; delegate != nil; delegate = delegate.NextDelegate() {
handler := delegate.UnprotectedHandler()
if handler == nil {
continue
}
apiServiceName := fmt.Sprintf(localDelegateChainNamePattern, i)
localAPIService := v1.APIService{}
localAPIService.Name = apiServiceName
s.AddUpdateAPIService(handler, &localAPIService)
s.UpdateAPIServiceSpec(apiServiceName)
i++
}
handler := handler3.NewOpenAPIService()
s.openAPIV2ConverterHandler = handler
openAPIV2ConverterMux := mux.NewPathRecorderMux(openAPIV2Converter)
s.openAPIV2ConverterHandler.RegisterOpenAPIV3VersionedService("/openapi/v3", openAPIV2ConverterMux)
openAPIV2ConverterAPIService := v1.APIService{}
openAPIV2ConverterAPIService.Name = openAPIV2Converter
s.AddUpdateAPIService(openAPIV2ConverterMux, &openAPIV2ConverterAPIService)
s.register(pathHandler)
return s, nil
}
// AddUpdateAPIService adds or updates the api service. It is thread safe.
func (s *specProxier) AddUpdateAPIService(handler http.Handler, apiservice *v1.APIService) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
// If the APIService is being updated, use the existing struct.
if apiServiceInfo, ok := s.apiServiceInfo[apiservice.Name]; ok {
apiServiceInfo.apiService = *apiservice
apiServiceInfo.handler = handler
return
}
s.apiServiceInfo[apiservice.Name] = &openAPIV3APIServiceInfo{
apiService: *apiservice,
handler: handler,
}
}
func getGroupVersionStringFromAPIService(apiService v1.APIService) string {
if apiService.Spec.Group == "" && apiService.Spec.Version == "" {
return ""
}
return "apis/" + apiService.Spec.Group + "/" + apiService.Spec.Version
}
// UpdateAPIServiceSpec updates all the OpenAPI v3 specs that the APIService serves.
// It is thread safe.
func (s *specProxier) UpdateAPIServiceSpec(apiServiceName string) error {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
return s.updateAPIServiceSpecLocked(apiServiceName)
}
func (s *specProxier) updateAPIServiceSpecLocked(apiServiceName string) error {
apiService, exists := s.apiServiceInfo[apiServiceName]
if !exists {
return ErrAPIServiceNotFound
}
if !apiService.isLegacyAPIService {
gv, httpStatus, err := s.downloader.OpenAPIV3Root(apiService.handler)
if err != nil {
return err
}
if httpStatus == http.StatusNotFound {
apiService.isLegacyAPIService = true
} else {
s.apiServiceInfo[apiServiceName].discovery = gv
return nil
}
}
newDownloader := v2aggregator.Downloader{}
v2Spec, etag, httpStatus, err := newDownloader.Download(apiService.handler, apiService.etag)
if err != nil {
return err
}
apiService.etag = etag
if httpStatus == http.StatusOK {
v3Spec := openapiconv.ConvertV2ToV3(v2Spec)
s.openAPIV2ConverterHandler.UpdateGroupVersion(getGroupVersionStringFromAPIService(apiService.apiService), v3Spec)
s.updateAPIServiceSpecLocked(openAPIV2Converter)
}
return nil
}
type specProxier struct {
// mutex protects all members of this struct.
rwMutex sync.RWMutex
// OpenAPI V3 specs by APIService name
apiServiceInfo map[string]*openAPIV3APIServiceInfo
// For downloading the OpenAPI v3 specs from apiservices
downloader Downloader
openAPIV2ConverterHandler *handler3.OpenAPIService
}
var _ SpecProxier = &specProxier{}
type openAPIV3APIServiceInfo struct {
apiService v1.APIService
handler http.Handler
discovery *handler3.OpenAPIV3Discovery
// These fields are only used if the /openapi/v3 endpoint is not served by an APIService
// Legacy APIService indicates that an APIService does not support OpenAPI V3, and the OpenAPI V2
// will be downloaded, converted to V3 (lossy), and served by the aggregator
etag string
isLegacyAPIService bool
}
// RemoveAPIServiceSpec removes an api service from the OpenAPI map. If it does not exist, no error is returned.
// It is thread safe.
func (s *specProxier) RemoveAPIServiceSpec(apiServiceName string) {
s.rwMutex.Lock()
defer s.rwMutex.Unlock()
if apiServiceInfo, ok := s.apiServiceInfo[apiServiceName]; ok {
s.openAPIV2ConverterHandler.DeleteGroupVersion(getGroupVersionStringFromAPIService(apiServiceInfo.apiService))
_ = s.updateAPIServiceSpecLocked(openAPIV2Converter)
delete(s.apiServiceInfo, apiServiceName)
}
}
func (s *specProxier) getOpenAPIV3Root() handler3.OpenAPIV3Discovery {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
merged := handler3.OpenAPIV3Discovery{
Paths: make(map[string]handler3.OpenAPIV3DiscoveryGroupVersion),
}
for _, apiServiceInfo := range s.apiServiceInfo {
if apiServiceInfo.discovery == nil {
continue
}
for key, item := range apiServiceInfo.discovery.Paths {
merged.Paths[key] = item
}
}
return merged
}
// handleDiscovery is the handler for OpenAPI V3 Discovery
func (s *specProxier) handleDiscovery(w http.ResponseWriter, r *http.Request) {
merged := s.getOpenAPIV3Root()
j, err := json.Marshal(&merged)
if err != nil {
w.WriteHeader(500)
klog.Errorf("failed to created merged OpenAPIv3 discovery response: %s", err.Error())
return
}
http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(j))
}
// handleGroupVersion is the OpenAPI V3 handler for a specified group/version
func (s *specProxier) handleGroupVersion(w http.ResponseWriter, r *http.Request) {
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
// TODO: Import this logic from kube-openapi instead of duplicating
// URLs for OpenAPI V3 have the format /openapi/v3/<groupversionpath>
// SplitAfterN with 4 yields ["", "openapi", "v3", <groupversionpath>]
url := strings.SplitAfterN(r.URL.Path, "/", 4)
targetGV := url[3]
for _, apiServiceInfo := range s.apiServiceInfo {
if apiServiceInfo.discovery == nil {
continue
}
for key := range apiServiceInfo.discovery.Paths {
if targetGV == key {
apiServiceInfo.handler.ServeHTTP(w, r)
return
}
}
}
// No group-versions match the desired request
w.WriteHeader(404)
}
// Register registers the OpenAPI V3 Discovery and GroupVersion handlers
func (s *specProxier) register(handler common.PathHandlerByGroupVersion) {
handler.Handle("/openapi/v3", metrics.InstrumentHandlerFunc("GET",
/* group = */ "",
/* version = */ "",
/* resource = */ "",
/* subresource = */ "openapi/v3",
/* scope = */ "",
/* component = */ "",
/* deprecated */ false,
/* removedRelease */ "",
http.HandlerFunc(s.handleDiscovery)))
handler.HandlePrefix("/openapi/v3/", metrics.InstrumentHandlerFunc("GET",
/* group = */ "",
/* version = */ "",
/* resource = */ "",
/* subresource = */ "openapi/v3/",
/* scope = */ "",
/* component = */ "",
/* deprecated */ false,
/* removedRelease */ "",
http.HandlerFunc(s.handleGroupVersion)))
}

View File

@ -1,115 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aggregator
import (
"encoding/json"
"fmt"
"net/http"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kube-openapi/pkg/handler3"
)
type NotFoundError struct {
}
func (e *NotFoundError) Error() string {
return ""
}
// Downloader is the OpenAPI downloader type. It will try to download spec from /openapi/v3 and /openap/v3/<group>/<version> endpoints.
type Downloader struct {
}
// NewDownloader creates a new OpenAPI Downloader.
func NewDownloader() Downloader {
return Downloader{}
}
func (s *Downloader) handlerWithUser(handler http.Handler, info user.Info) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
req = req.WithContext(request.WithUser(req.Context(), info))
handler.ServeHTTP(w, req)
})
}
// OpenAPIV3Root downloads the OpenAPI V3 root document from an APIService
func (s *Downloader) OpenAPIV3Root(handler http.Handler) (*handler3.OpenAPIV3Discovery, int, error) {
handler = s.handlerWithUser(handler, &user.DefaultInfo{Name: aggregatorUser})
handler = http.TimeoutHandler(handler, specDownloadTimeout, "request timed out")
req, err := http.NewRequest("GET", "/openapi/v3", nil)
if err != nil {
return nil, 0, err
}
writer := newInMemoryResponseWriter()
handler.ServeHTTP(writer, req)
switch writer.respCode {
case http.StatusNotFound:
return nil, writer.respCode, nil
case http.StatusOK:
groups := handler3.OpenAPIV3Discovery{}
if err := json.Unmarshal(writer.data, &groups); err != nil {
return nil, writer.respCode, err
}
return &groups, writer.respCode, nil
}
return nil, writer.respCode, fmt.Errorf("Error, could not get list of group versions for APIService")
}
// inMemoryResponseWriter is a http.Writer that keep the response in memory.
type inMemoryResponseWriter struct {
writeHeaderCalled bool
header http.Header
respCode int
data []byte
}
func newInMemoryResponseWriter() *inMemoryResponseWriter {
return &inMemoryResponseWriter{header: http.Header{}}
}
func (r *inMemoryResponseWriter) Header() http.Header {
return r.header
}
func (r *inMemoryResponseWriter) WriteHeader(code int) {
r.writeHeaderCalled = true
r.respCode = code
}
func (r *inMemoryResponseWriter) Write(in []byte) (int, error) {
if !r.writeHeaderCalled {
r.WriteHeader(http.StatusOK)
}
r.data = append(r.data, in...)
return len(in), nil
}
func (r *inMemoryResponseWriter) String() string {
s := fmt.Sprintf("ResponseCode: %d", r.respCode)
if r.data != nil {
s += fmt.Sprintf(", Body: %s", string(r.data))
}
if r.header != nil {
s += fmt.Sprintf(", Header: %s", r.header)
}
return s
}

View File

@ -1,175 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openapiv3
import (
"fmt"
"net/http"
"time"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator"
)
const (
successfulUpdateDelay = time.Minute
successfulUpdateDelayLocal = time.Second
failedUpdateMaxExpDelay = time.Hour
)
type syncAction int
const (
syncRequeue syncAction = iota
syncRequeueRateLimited
syncNothing
)
// AggregationController periodically checks the list of group-versions handled by each APIService and updates the discovery page periodically
type AggregationController struct {
openAPIAggregationManager aggregator.SpecProxier
queue workqueue.TypedRateLimitingInterface[string]
// To allow injection for testing.
syncHandler func(key string) (syncAction, error)
}
// NewAggregationController creates new OpenAPI aggregation controller.
func NewAggregationController(openAPIAggregationManager aggregator.SpecProxier) *AggregationController {
c := &AggregationController{
openAPIAggregationManager: openAPIAggregationManager,
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.NewTypedItemExponentialFailureRateLimiter[string](successfulUpdateDelay, failedUpdateMaxExpDelay),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "open_api_v3_aggregation_controller"},
),
}
c.syncHandler = c.sync
// update each service at least once, also those which are not coming from APIServices, namely local services
for _, name := range openAPIAggregationManager.GetAPIServiceNames() {
c.queue.AddAfter(name, time.Second)
}
return c
}
// Run starts OpenAPI AggregationController
func (c *AggregationController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.Info("Starting OpenAPI V3 AggregationController")
defer klog.Info("Shutting down OpenAPI V3 AggregationController")
go wait.Until(c.runWorker, time.Second, stopCh)
<-stopCh
}
func (c *AggregationController) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (c *AggregationController) processNextWorkItem() bool {
key, quit := c.queue.Get()
defer c.queue.Done(key)
if quit {
return false
}
if aggregator.IsLocalAPIService(key) {
// for local delegation targets that are aggregated once per second, log at
// higher level to avoid flooding the log
klog.V(6).Infof("OpenAPI AggregationController: Processing item %s", key)
} else {
klog.V(4).Infof("OpenAPI AggregationController: Processing item %s", key)
}
action, err := c.syncHandler(key)
if err == nil {
c.queue.Forget(key)
} else {
utilruntime.HandleError(fmt.Errorf("loading OpenAPI spec for %q failed with: %v", key, err))
}
switch action {
case syncRequeue:
if aggregator.IsLocalAPIService(key) {
klog.V(7).Infof("OpenAPI AggregationController: action for local item %s: Requeue after %s.", key, successfulUpdateDelayLocal)
c.queue.AddAfter(key, successfulUpdateDelayLocal)
} else {
klog.V(7).Infof("OpenAPI AggregationController: action for item %s: Requeue.", key)
c.queue.AddAfter(key, successfulUpdateDelay)
}
case syncRequeueRateLimited:
klog.Infof("OpenAPI AggregationController: action for item %s: Rate Limited Requeue.", key)
c.queue.AddRateLimited(key)
case syncNothing:
klog.Infof("OpenAPI AggregationController: action for item %s: Nothing (removed from the queue).", key)
}
return true
}
func (c *AggregationController) sync(key string) (syncAction, error) {
if err := c.openAPIAggregationManager.UpdateAPIServiceSpec(key); err != nil {
if err == aggregator.ErrAPIServiceNotFound {
return syncNothing, nil
}
return syncRequeueRateLimited, err
}
return syncRequeue, nil
}
// AddAPIService adds a new API Service to OpenAPI Aggregation.
func (c *AggregationController) AddAPIService(handler http.Handler, apiService *v1.APIService) {
if apiService.Spec.Service == nil {
return
}
c.openAPIAggregationManager.AddUpdateAPIService(handler, apiService)
c.queue.AddAfter(apiService.Name, time.Second)
}
// UpdateAPIService updates API Service's info and handler.
func (c *AggregationController) UpdateAPIService(handler http.Handler, apiService *v1.APIService) {
if apiService.Spec.Service == nil {
return
}
c.openAPIAggregationManager.AddUpdateAPIService(handler, apiService)
key := apiService.Name
if c.queue.NumRequeues(key) > 0 {
// The item has failed before. Remove it from failure queue and
// update it in a second
c.queue.Forget(key)
c.queue.AddAfter(key, time.Second)
}
}
// RemoveAPIService removes API Service from OpenAPI Aggregation Controller.
func (c *AggregationController) RemoveAPIService(apiServiceName string) {
c.openAPIAggregationManager.RemoveAPIServiceSpec(apiServiceName)
// This will only remove it if it was failing before. If it was successful, processNextWorkItem will figure it out
// and will not add it again to the queue.
c.queue.Forget(apiServiceName)
}

View File

@ -1,227 +0,0 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package external
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper"
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
informers "k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1"
listers "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1"
"k8s.io/kube-aggregator/pkg/controllers"
availabilitymetrics "k8s.io/kube-aggregator/pkg/controllers/status/metrics"
)
// AvailableConditionController handles checking the availability of registered local API services.
type AvailableConditionController struct {
apiServiceClient apiregistrationclient.APIServicesGetter
apiServiceLister listers.APIServiceLister
apiServiceSynced cache.InformerSynced
// To allow injection for testing.
syncFn func(key string) error
queue workqueue.TypedRateLimitingInterface[string]
// metrics registered into legacy registry
metrics *availabilitymetrics.Metrics
}
// New returns a new local availability AvailableConditionController.
func New(
apiServiceInformer informers.APIServiceInformer,
apiServiceClient apiregistrationclient.APIServicesGetter,
metrics *availabilitymetrics.Metrics,
) (*AvailableConditionController, error) {
c := &AvailableConditionController{
apiServiceClient: apiServiceClient,
apiServiceLister: apiServiceInformer.Lister(),
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
// We want a fairly tight requeue time. The controller listens to the API, but because it relies on the routability of the
// service network, it is possible for an external, non-watchable factor to affect availability. This keeps
// the maximum disruption time to a minimum, but it does prevent hot loops.
workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 30*time.Second),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "LocalAvailabilityController"},
),
metrics: metrics,
}
// resync on this one because it is low cardinality and rechecking the actual discovery
// allows us to detect health in a more timely fashion when network connectivity to
// nodes is snipped, but the network still attempts to route there. See
// https://github.com/openshift/origin/issues/17159#issuecomment-341798063
apiServiceHandler, _ := apiServiceInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: c.addAPIService,
UpdateFunc: c.updateAPIService,
DeleteFunc: c.deleteAPIService,
},
30*time.Second)
c.apiServiceSynced = apiServiceHandler.HasSynced
c.syncFn = c.sync
return c, nil
}
func (c *AvailableConditionController) sync(key string) error {
originalAPIService, err := c.apiServiceLister.Get(key)
if apierrors.IsNotFound(err) {
c.metrics.ForgetAPIService(key)
return nil
}
if err != nil {
return err
}
if originalAPIService.Spec.Service != nil {
// this controller only handles local APIServices
return nil
}
// local API services are always considered available
apiService := originalAPIService.DeepCopy()
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, apiregistrationv1apihelper.NewLocalAvailableAPIServiceCondition())
_, err = c.updateAPIServiceStatus(originalAPIService, apiService)
return err
}
// updateAPIServiceStatus only issues an update if a change is detected. We have a tight resync loop to quickly detect dead
// apiservices. Doing that means we don't want to quickly issue no-op updates.
func (c *AvailableConditionController) updateAPIServiceStatus(originalAPIService, newAPIService *apiregistrationv1.APIService) (*apiregistrationv1.APIService, error) {
// update this metric on every sync operation to reflect the actual state
c.metrics.SetUnavailableGauge(newAPIService)
if equality.Semantic.DeepEqual(originalAPIService.Status, newAPIService.Status) {
return newAPIService, nil
}
orig := apiregistrationv1apihelper.GetAPIServiceConditionByType(originalAPIService, apiregistrationv1.Available)
now := apiregistrationv1apihelper.GetAPIServiceConditionByType(newAPIService, apiregistrationv1.Available)
unknown := apiregistrationv1.APIServiceCondition{
Type: apiregistrationv1.Available,
Status: apiregistrationv1.ConditionUnknown,
}
if orig == nil {
orig = &unknown
}
if now == nil {
now = &unknown
}
if *orig != *now {
klog.V(2).InfoS("changing APIService availability", "name", newAPIService.Name, "oldStatus", orig.Status, "newStatus", now.Status, "message", now.Message, "reason", now.Reason)
}
newAPIService, err := c.apiServiceClient.APIServices().UpdateStatus(context.TODO(), newAPIService, metav1.UpdateOptions{})
if err != nil {
return nil, err
}
c.metrics.SetUnavailableCounter(originalAPIService, newAPIService)
return newAPIService, nil
}
// Run starts the AvailableConditionController loop which manages the availability condition of API services.
func (c *AvailableConditionController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.Info("Starting LocalAvailability controller")
defer klog.Info("Shutting down LocalAvailability controller")
// This waits not just for the informers to sync, but for our handlers
// to be called; since the handlers are three different ways of
// enqueueing the same thing, waiting for this permits the queue to
// maximally de-duplicate the entries.
if !controllers.WaitForCacheSync("LocalAvailability", stopCh, c.apiServiceSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
}
func (c *AvailableConditionController) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (c *AvailableConditionController) processNextWorkItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.syncFn(key)
if err == nil {
c.queue.Forget(key)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with: %w", key, err))
c.queue.AddRateLimited(key)
return true
}
func (c *AvailableConditionController) addAPIService(obj interface{}) {
castObj := obj.(*apiregistrationv1.APIService)
klog.V(4).Infof("Adding %s", castObj.Name)
c.queue.Add(castObj.Name)
}
func (c *AvailableConditionController) updateAPIService(oldObj, _ interface{}) {
oldCastObj := oldObj.(*apiregistrationv1.APIService)
klog.V(4).Infof("Updating %s", oldCastObj.Name)
c.queue.Add(oldCastObj.Name)
}
func (c *AvailableConditionController) deleteAPIService(obj interface{}) {
castObj, ok := obj.(*apiregistrationv1.APIService)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
castObj, ok = tombstone.Obj.(*apiregistrationv1.APIService)
if !ok {
klog.Errorf("Tombstone contained object that is not expected %#v", obj)
return
}
}
klog.V(4).Infof("Deleting %q", castObj.Name)
c.queue.Add(castObj.Name)
}

View File

@ -1,177 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"k8s.io/component-base/metrics"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper"
)
/*
* By default, all the following metrics are defined as falling under
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
*
* Promoting the stability level of the metric is a responsibility of the component owner, since it
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
* the metric stability policy.
*/
var (
unavailableGaugeDesc = metrics.NewDesc(
"aggregator_unavailable_apiservice",
"Gauge of APIServices which are marked as unavailable broken down by APIService name.",
[]string{"name"},
nil,
metrics.ALPHA,
"",
)
)
type Metrics struct {
unavailableCounter *metrics.CounterVec
*availabilityCollector
}
func New() *Metrics {
return &Metrics{
unavailableCounter: metrics.NewCounterVec(
&metrics.CounterOpts{
Name: "aggregator_unavailable_apiservice_total",
Help: "Counter of APIServices which are marked as unavailable broken down by APIService name and reason.",
StabilityLevel: metrics.ALPHA,
},
[]string{"name", "reason"},
),
availabilityCollector: newAvailabilityCollector(),
}
}
// Register registers apiservice availability metrics.
func (m *Metrics) Register(
registrationFunc func(metrics.Registerable) error,
customRegistrationFunc func(metrics.StableCollector) error,
) error {
err := registrationFunc(m.unavailableCounter)
if err != nil {
return err
}
err = customRegistrationFunc(m.availabilityCollector)
if err != nil {
return err
}
return nil
}
// UnavailableCounter returns a counter to track apiservices marked as unavailable.
func (m *Metrics) UnavailableCounter(apiServiceName, reason string) metrics.CounterMetric {
return m.unavailableCounter.WithLabelValues(apiServiceName, reason)
}
type availabilityCollector struct {
metrics.BaseStableCollector
mtx sync.RWMutex
availabilities map[string]bool
}
// SetUnavailableGauge set the metrics so that it reflect the current state base on availability of the given service
func (m *Metrics) SetUnavailableGauge(newAPIService *apiregistrationv1.APIService) {
if apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available) {
m.SetAPIServiceAvailable(newAPIService.Name)
return
}
m.SetAPIServiceUnavailable(newAPIService.Name)
}
// SetUnavailableCounter increases the metrics only if the given service is unavailable and its APIServiceCondition has changed
func (m *Metrics) SetUnavailableCounter(originalAPIService, newAPIService *apiregistrationv1.APIService) {
wasAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(originalAPIService, apiregistrationv1.Available)
isAvailable := apiregistrationv1apihelper.IsAPIServiceConditionTrue(newAPIService, apiregistrationv1.Available)
statusChanged := isAvailable != wasAvailable
if statusChanged && !isAvailable {
reason := "UnknownReason"
if newCondition := apiregistrationv1apihelper.GetAPIServiceConditionByType(newAPIService, apiregistrationv1.Available); newCondition != nil {
reason = newCondition.Reason
}
m.UnavailableCounter(newAPIService.Name, reason).Inc()
}
}
// Check if apiServiceStatusCollector implements necessary interface.
var _ metrics.StableCollector = &availabilityCollector{}
func newAvailabilityCollector() *availabilityCollector {
return &availabilityCollector{
availabilities: make(map[string]bool),
}
}
// DescribeWithStability implements the metrics.StableCollector interface.
func (c *availabilityCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
ch <- unavailableGaugeDesc
}
// CollectWithStability implements the metrics.StableCollector interface.
func (c *availabilityCollector) CollectWithStability(ch chan<- metrics.Metric) {
c.mtx.RLock()
defer c.mtx.RUnlock()
for apiServiceName, isAvailable := range c.availabilities {
gaugeValue := 1.0
if isAvailable {
gaugeValue = 0.0
}
ch <- metrics.NewLazyConstMetric(
unavailableGaugeDesc,
metrics.GaugeValue,
gaugeValue,
apiServiceName,
)
}
}
// SetAPIServiceAvailable sets the given apiservice availability gauge to available.
func (c *availabilityCollector) SetAPIServiceAvailable(apiServiceKey string) {
c.setAPIServiceAvailability(apiServiceKey, true)
}
// SetAPIServiceUnavailable sets the given apiservice availability gauge to unavailable.
func (c *availabilityCollector) SetAPIServiceUnavailable(apiServiceKey string) {
c.setAPIServiceAvailability(apiServiceKey, false)
}
func (c *availabilityCollector) setAPIServiceAvailability(apiServiceKey string, availability bool) {
c.mtx.Lock()
defer c.mtx.Unlock()
c.availabilities[apiServiceKey] = availability
}
// ForgetAPIService removes the availability gauge of the given apiservice.
func (c *availabilityCollector) ForgetAPIService(apiServiceKey string) {
c.mtx.Lock()
defer c.mtx.Unlock()
delete(c.availabilities, apiServiceKey)
}

View File

@ -1,588 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remote
import (
"context"
"fmt"
"net/http"
"net/url"
"reflect"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
v1informers "k8s.io/client-go/informers/core/v1"
v1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/transport"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
apiregistrationv1apihelper "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper"
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
informers "k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1"
listers "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1"
"k8s.io/kube-aggregator/pkg/controllers"
availabilitymetrics "k8s.io/kube-aggregator/pkg/controllers/status/metrics"
)
type certKeyFunc func() ([]byte, []byte)
// ServiceResolver knows how to convert a service reference into an actual location.
type ServiceResolver interface {
ResolveEndpoint(namespace, name string, port int32) (*url.URL, error)
}
// AvailableConditionController handles checking the availability of registered API services.
type AvailableConditionController struct {
apiServiceClient apiregistrationclient.APIServicesGetter
apiServiceLister listers.APIServiceLister
apiServiceSynced cache.InformerSynced
// serviceLister is used to get the IP to create the transport for
serviceLister v1listers.ServiceLister
servicesSynced cache.InformerSynced
endpointsLister v1listers.EndpointsLister
endpointsSynced cache.InformerSynced
// proxyTransportDial specifies the dial function for creating unencrypted TCP connections.
proxyTransportDial *transport.DialHolder
proxyCurrentCertKeyContent certKeyFunc
serviceResolver ServiceResolver
// To allow injection for testing.
syncFn func(key string) error
queue workqueue.TypedRateLimitingInterface[string]
// map from service-namespace -> service-name -> apiservice names
cache map[string]map[string][]string
// this lock protects operations on the above cache
cacheLock sync.RWMutex
// metrics registered into legacy registry
metrics *availabilitymetrics.Metrics
}
// New returns a new remote APIService AvailableConditionController.
func New(
apiServiceInformer informers.APIServiceInformer,
serviceInformer v1informers.ServiceInformer,
endpointsInformer v1informers.EndpointsInformer,
apiServiceClient apiregistrationclient.APIServicesGetter,
proxyTransportDial *transport.DialHolder,
proxyCurrentCertKeyContent certKeyFunc,
serviceResolver ServiceResolver,
metrics *availabilitymetrics.Metrics,
) (*AvailableConditionController, error) {
c := &AvailableConditionController{
apiServiceClient: apiServiceClient,
apiServiceLister: apiServiceInformer.Lister(),
serviceLister: serviceInformer.Lister(),
endpointsLister: endpointsInformer.Lister(),
serviceResolver: serviceResolver,
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
// We want a fairly tight requeue time. The controller listens to the API, but because it relies on the routability of the
// service network, it is possible for an external, non-watchable factor to affect availability. This keeps
// the maximum disruption time to a minimum, but it does prevent hot loops.
workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 30*time.Second),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "RemoteAvailabilityController"},
),
proxyTransportDial: proxyTransportDial,
proxyCurrentCertKeyContent: proxyCurrentCertKeyContent,
metrics: metrics,
}
// resync on this one because it is low cardinality and rechecking the actual discovery
// allows us to detect health in a more timely fashion when network connectivity to
// nodes is snipped, but the network still attempts to route there. See
// https://github.com/openshift/origin/issues/17159#issuecomment-341798063
apiServiceHandler, _ := apiServiceInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: c.addAPIService,
UpdateFunc: c.updateAPIService,
DeleteFunc: c.deleteAPIService,
},
30*time.Second)
c.apiServiceSynced = apiServiceHandler.HasSynced
serviceHandler, _ := serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addService,
UpdateFunc: c.updateService,
DeleteFunc: c.deleteService,
})
c.servicesSynced = serviceHandler.HasSynced
endpointsHandler, _ := endpointsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addEndpoints,
UpdateFunc: c.updateEndpoints,
DeleteFunc: c.deleteEndpoints,
})
c.endpointsSynced = endpointsHandler.HasSynced
c.syncFn = c.sync
return c, nil
}
func (c *AvailableConditionController) sync(key string) error {
originalAPIService, err := c.apiServiceLister.Get(key)
if apierrors.IsNotFound(err) {
c.metrics.ForgetAPIService(key)
return nil
}
if err != nil {
return err
}
if originalAPIService.Spec.Service == nil {
// handled by the local APIService controller
return nil
}
apiService := originalAPIService.DeepCopy()
// if a particular transport was specified, use that otherwise build one
// construct an http client that will ignore TLS verification (if someone owns the network and messes with your status
// that's not so bad) and sets a very short timeout. This is a best effort GET that provides no additional information
transportConfig := &transport.Config{
TLS: transport.TLSConfig{
Insecure: true,
},
DialHolder: c.proxyTransportDial,
}
if c.proxyCurrentCertKeyContent != nil {
proxyClientCert, proxyClientKey := c.proxyCurrentCertKeyContent()
transportConfig.TLS.CertData = proxyClientCert
transportConfig.TLS.KeyData = proxyClientKey
}
restTransport, err := transport.New(transportConfig)
if err != nil {
return err
}
discoveryClient := &http.Client{
Transport: restTransport,
// the request should happen quickly.
Timeout: 5 * time.Second,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
availableCondition := apiregistrationv1.APIServiceCondition{
Type: apiregistrationv1.Available,
Status: apiregistrationv1.ConditionTrue,
LastTransitionTime: metav1.Now(),
}
service, err := c.serviceLister.Services(apiService.Spec.Service.Namespace).Get(apiService.Spec.Service.Name)
if apierrors.IsNotFound(err) {
availableCondition.Status = apiregistrationv1.ConditionFalse
availableCondition.Reason = "ServiceNotFound"
availableCondition.Message = fmt.Sprintf("service/%s in %q is not present", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err
} else if err != nil {
availableCondition.Status = apiregistrationv1.ConditionUnknown
availableCondition.Reason = "ServiceAccessError"
availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err
}
if service.Spec.Type == v1.ServiceTypeClusterIP {
// if we have a cluster IP service, it must be listening on configured port and we can check that
servicePort := apiService.Spec.Service.Port
portName := ""
foundPort := false
for _, port := range service.Spec.Ports {
if port.Port == *servicePort {
foundPort = true
portName = port.Name
break
}
}
if !foundPort {
availableCondition.Status = apiregistrationv1.ConditionFalse
availableCondition.Reason = "ServicePortError"
availableCondition.Message = fmt.Sprintf("service/%s in %q is not listening on port %d", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, *apiService.Spec.Service.Port)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err
}
endpoints, err := c.endpointsLister.Endpoints(apiService.Spec.Service.Namespace).Get(apiService.Spec.Service.Name)
if apierrors.IsNotFound(err) {
availableCondition.Status = apiregistrationv1.ConditionFalse
availableCondition.Reason = "EndpointsNotFound"
availableCondition.Message = fmt.Sprintf("cannot find endpoints for service/%s in %q", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err
} else if err != nil {
availableCondition.Status = apiregistrationv1.ConditionUnknown
availableCondition.Reason = "EndpointsAccessError"
availableCondition.Message = fmt.Sprintf("service/%s in %q cannot be checked due to: %v", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, err)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err
}
hasActiveEndpoints := false
outer:
for _, subset := range endpoints.Subsets {
if len(subset.Addresses) == 0 {
continue
}
for _, endpointPort := range subset.Ports {
if endpointPort.Name == portName {
hasActiveEndpoints = true
break outer
}
}
}
if !hasActiveEndpoints {
availableCondition.Status = apiregistrationv1.ConditionFalse
availableCondition.Reason = "MissingEndpoints"
availableCondition.Message = fmt.Sprintf("endpoints for service/%s in %q have no addresses with port name %q", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace, portName)
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err := c.updateAPIServiceStatus(originalAPIService, apiService)
return err
}
}
// actually try to hit the discovery endpoint when it isn't local and when we're routing as a service.
if apiService.Spec.Service != nil && c.serviceResolver != nil {
attempts := 5
results := make(chan error, attempts)
for i := 0; i < attempts; i++ {
go func() {
discoveryURL, err := c.serviceResolver.ResolveEndpoint(apiService.Spec.Service.Namespace, apiService.Spec.Service.Name, *apiService.Spec.Service.Port)
if err != nil {
results <- err
return
}
// render legacyAPIService health check path when it is delegated to a service
if apiService.Name == "v1." {
discoveryURL.Path = "/api/" + apiService.Spec.Version
} else {
discoveryURL.Path = "/apis/" + apiService.Spec.Group + "/" + apiService.Spec.Version
}
errCh := make(chan error, 1)
go func() {
// be sure to check a URL that the aggregated API server is required to serve
newReq, err := http.NewRequest("GET", discoveryURL.String(), nil)
if err != nil {
errCh <- err
return
}
// setting the system-masters identity ensures that we will always have access rights
transport.SetAuthProxyHeaders(newReq, "system:kube-aggregator", []string{"system:masters"}, nil)
resp, err := discoveryClient.Do(newReq)
if resp != nil {
resp.Body.Close()
// we should always been in the 200s or 300s
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
errCh <- fmt.Errorf("bad status from %v: %d", discoveryURL, resp.StatusCode)
return
}
}
errCh <- err
}()
select {
case err = <-errCh:
if err != nil {
results <- fmt.Errorf("failing or missing response from %v: %w", discoveryURL, err)
return
}
// we had trouble with slow dial and DNS responses causing us to wait too long.
// we added this as insurance
case <-time.After(6 * time.Second):
results <- fmt.Errorf("timed out waiting for %v", discoveryURL)
return
}
results <- nil
}()
}
var lastError error
for i := 0; i < attempts; i++ {
lastError = <-results
// if we had at least one success, we are successful overall and we can return now
if lastError == nil {
break
}
}
if lastError != nil {
availableCondition.Status = apiregistrationv1.ConditionFalse
availableCondition.Reason = "FailedDiscoveryCheck"
availableCondition.Message = lastError.Error()
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, updateErr := c.updateAPIServiceStatus(originalAPIService, apiService)
if updateErr != nil {
return updateErr
}
// force a requeue to make it very obvious that this will be retried at some point in the future
// along with other requeues done via service change, endpoint change, and resync
return lastError
}
}
availableCondition.Reason = "Passed"
availableCondition.Message = "all checks passed"
apiregistrationv1apihelper.SetAPIServiceCondition(apiService, availableCondition)
_, err = c.updateAPIServiceStatus(originalAPIService, apiService)
return err
}
// updateAPIServiceStatus only issues an update if a change is detected. We have a tight resync loop to quickly detect dead
// apiservices. Doing that means we don't want to quickly issue no-op updates.
func (c *AvailableConditionController) updateAPIServiceStatus(originalAPIService, newAPIService *apiregistrationv1.APIService) (*apiregistrationv1.APIService, error) {
// update this metric on every sync operation to reflect the actual state
c.metrics.SetUnavailableGauge(newAPIService)
if equality.Semantic.DeepEqual(originalAPIService.Status, newAPIService.Status) {
return newAPIService, nil
}
orig := apiregistrationv1apihelper.GetAPIServiceConditionByType(originalAPIService, apiregistrationv1.Available)
now := apiregistrationv1apihelper.GetAPIServiceConditionByType(newAPIService, apiregistrationv1.Available)
unknown := apiregistrationv1.APIServiceCondition{
Type: apiregistrationv1.Available,
Status: apiregistrationv1.ConditionUnknown,
}
if orig == nil {
orig = &unknown
}
if now == nil {
now = &unknown
}
if *orig != *now {
klog.V(2).InfoS("changing APIService availability", "name", newAPIService.Name, "oldStatus", orig.Status, "newStatus", now.Status, "message", now.Message, "reason", now.Reason)
}
newAPIService, err := c.apiServiceClient.APIServices().UpdateStatus(context.TODO(), newAPIService, metav1.UpdateOptions{})
if err != nil {
return nil, err
}
c.metrics.SetUnavailableCounter(originalAPIService, newAPIService)
return newAPIService, nil
}
// Run starts the AvailableConditionController loop which manages the availability condition of API services.
func (c *AvailableConditionController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.Info("Starting RemoteAvailability controller")
defer klog.Info("Shutting down RemoteAvailability controller")
// This waits not just for the informers to sync, but for our handlers
// to be called; since the handlers are three different ways of
// enqueueing the same thing, waiting for this permits the queue to
// maximally de-duplicate the entries.
if !controllers.WaitForCacheSync("RemoteAvailability", stopCh, c.apiServiceSynced, c.servicesSynced, c.endpointsSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
}
func (c *AvailableConditionController) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (c *AvailableConditionController) processNextWorkItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.syncFn(key)
if err == nil {
c.queue.Forget(key)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with: %v", key, err))
c.queue.AddRateLimited(key)
return true
}
func (c *AvailableConditionController) addAPIService(obj interface{}) {
castObj := obj.(*apiregistrationv1.APIService)
klog.V(4).Infof("Adding %s", castObj.Name)
if castObj.Spec.Service != nil {
c.rebuildAPIServiceCache()
}
c.queue.Add(castObj.Name)
}
func (c *AvailableConditionController) updateAPIService(oldObj, newObj interface{}) {
castObj := newObj.(*apiregistrationv1.APIService)
oldCastObj := oldObj.(*apiregistrationv1.APIService)
klog.V(4).Infof("Updating %s", oldCastObj.Name)
if !reflect.DeepEqual(castObj.Spec.Service, oldCastObj.Spec.Service) {
c.rebuildAPIServiceCache()
}
c.queue.Add(oldCastObj.Name)
}
func (c *AvailableConditionController) deleteAPIService(obj interface{}) {
castObj, ok := obj.(*apiregistrationv1.APIService)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
castObj, ok = tombstone.Obj.(*apiregistrationv1.APIService)
if !ok {
klog.Errorf("Tombstone contained object that is not expected %#v", obj)
return
}
}
klog.V(4).Infof("Deleting %q", castObj.Name)
if castObj.Spec.Service != nil {
c.rebuildAPIServiceCache()
}
c.queue.Add(castObj.Name)
}
func (c *AvailableConditionController) getAPIServicesFor(obj runtime.Object) []string {
metadata, err := meta.Accessor(obj)
if err != nil {
utilruntime.HandleError(err)
return nil
}
c.cacheLock.RLock()
defer c.cacheLock.RUnlock()
return c.cache[metadata.GetNamespace()][metadata.GetName()]
}
// if the service/endpoint handler wins the race against the cache rebuilding, it may queue a no-longer-relevant apiservice
// (which will get processed an extra time - this doesn't matter),
// and miss a newly relevant apiservice (which will get queued by the apiservice handler)
func (c *AvailableConditionController) rebuildAPIServiceCache() {
apiServiceList, _ := c.apiServiceLister.List(labels.Everything())
newCache := map[string]map[string][]string{}
for _, apiService := range apiServiceList {
if apiService.Spec.Service == nil {
continue
}
if newCache[apiService.Spec.Service.Namespace] == nil {
newCache[apiService.Spec.Service.Namespace] = map[string][]string{}
}
newCache[apiService.Spec.Service.Namespace][apiService.Spec.Service.Name] = append(newCache[apiService.Spec.Service.Namespace][apiService.Spec.Service.Name], apiService.Name)
}
c.cacheLock.Lock()
defer c.cacheLock.Unlock()
c.cache = newCache
}
// TODO, think of a way to avoid checking on every service manipulation
func (c *AvailableConditionController) addService(obj interface{}) {
for _, apiService := range c.getAPIServicesFor(obj.(*v1.Service)) {
c.queue.Add(apiService)
}
}
func (c *AvailableConditionController) updateService(obj, _ interface{}) {
for _, apiService := range c.getAPIServicesFor(obj.(*v1.Service)) {
c.queue.Add(apiService)
}
}
func (c *AvailableConditionController) deleteService(obj interface{}) {
castObj, ok := obj.(*v1.Service)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
castObj, ok = tombstone.Obj.(*v1.Service)
if !ok {
klog.Errorf("Tombstone contained object that is not expected %#v", obj)
return
}
}
for _, apiService := range c.getAPIServicesFor(castObj) {
c.queue.Add(apiService)
}
}
func (c *AvailableConditionController) addEndpoints(obj interface{}) {
for _, apiService := range c.getAPIServicesFor(obj.(*v1.Endpoints)) {
c.queue.Add(apiService)
}
}
func (c *AvailableConditionController) updateEndpoints(obj, _ interface{}) {
for _, apiService := range c.getAPIServicesFor(obj.(*v1.Endpoints)) {
c.queue.Add(apiService)
}
}
func (c *AvailableConditionController) deleteEndpoints(obj interface{}) {
castObj, ok := obj.(*v1.Endpoints)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
castObj, ok = tombstone.Obj.(*v1.Endpoints)
if !ok {
klog.Errorf("Tombstone contained object that is not expected %#v", obj)
return
}
}
for _, apiService := range c.getAPIServicesFor(castObj) {
c.queue.Add(apiService)
}
}

View File

@ -1,4 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
labels:
- sig/etcd

View File

@ -1,172 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metatable "k8s.io/apimachinery/pkg/api/meta/table"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
"k8s.io/kube-aggregator/pkg/registry/apiservice"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
// REST implements a RESTStorage for API services against etcd
type REST struct {
*genericregistry.Store
}
// NewREST returns a RESTStorage object that will work against API services.
func NewREST(scheme *runtime.Scheme, optsGetter generic.RESTOptionsGetter) *REST {
strategy := apiservice.NewStrategy(scheme)
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &apiregistration.APIService{} },
NewListFunc: func() runtime.Object { return &apiregistration.APIServiceList{} },
PredicateFunc: apiservice.MatchAPIService,
DefaultQualifiedResource: apiregistration.Resource("apiservices"),
SingularQualifiedResource: apiregistration.Resource("apiservice"),
CreateStrategy: strategy,
UpdateStrategy: strategy,
DeleteStrategy: strategy,
ResetFieldsStrategy: strategy,
// TODO: define table converter that exposes more than name/creation timestamp
TableConvertor: rest.NewDefaultTableConvertor(apiregistration.Resource("apiservices")),
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: apiservice.GetAttrs}
if err := store.CompleteWithOptions(options); err != nil {
panic(err) // TODO: Propagate error up
}
return &REST{store}
}
// Implement CategoriesProvider
var _ rest.CategoriesProvider = &REST{}
// Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of.
func (c *REST) Categories() []string {
return []string{"api-extensions"}
}
var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc()
// ConvertToTable implements the TableConvertor interface for REST.
func (c *REST) ConvertToTable(ctx context.Context, obj runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
table := &metav1.Table{
ColumnDefinitions: []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: swaggerMetadataDescriptions["name"]},
{Name: "Service", Type: "string", Description: "The reference to the service that hosts this API endpoint."},
{Name: "Available", Type: "string", Description: "Whether this service is available."},
{Name: "Age", Type: "string", Description: swaggerMetadataDescriptions["creationTimestamp"]},
},
}
if m, err := meta.ListAccessor(obj); err == nil {
table.ResourceVersion = m.GetResourceVersion()
table.Continue = m.GetContinue()
table.RemainingItemCount = m.GetRemainingItemCount()
} else {
if m, err := meta.CommonAccessor(obj); err == nil {
table.ResourceVersion = m.GetResourceVersion()
}
}
var err error
table.Rows, err = metatable.MetaToTableRow(obj, func(obj runtime.Object, m metav1.Object, name, age string) ([]interface{}, error) {
svc := obj.(*apiregistration.APIService)
service := "Local"
if svc.Spec.Service != nil {
service = fmt.Sprintf("%s/%s", svc.Spec.Service.Namespace, svc.Spec.Service.Name)
}
status := string(apiregistration.ConditionUnknown)
if condition := getCondition(svc.Status.Conditions, "Available"); condition != nil {
switch {
case condition.Status == apiregistration.ConditionTrue:
status = string(condition.Status)
case len(condition.Reason) > 0:
status = fmt.Sprintf("%s (%s)", condition.Status, condition.Reason)
default:
status = string(condition.Status)
}
}
return []interface{}{name, service, status, age}, nil
})
return table, err
}
func getCondition(conditions []apiregistration.APIServiceCondition, conditionType apiregistration.APIServiceConditionType) *apiregistration.APIServiceCondition {
for i, condition := range conditions {
if condition.Type == conditionType {
return &conditions[i]
}
}
return nil
}
// NewStatusREST makes a RESTStorage for status that has more limited options.
// It is based on the original REST so that we can share the same underlying store
func NewStatusREST(scheme *runtime.Scheme, rest *REST) *StatusREST {
strategy := apiservice.NewStatusStrategy(scheme)
statusStore := *rest.Store
statusStore.CreateStrategy = nil
statusStore.DeleteStrategy = nil
statusStore.UpdateStrategy = strategy
statusStore.ResetFieldsStrategy = strategy
return &StatusREST{store: &statusStore}
}
// StatusREST implements the REST endpoint for changing the status of an APIService.
type StatusREST struct {
store *genericregistry.Store
}
var _ = rest.Patcher(&StatusREST{})
// New creates a new APIService object.
func (r *StatusREST) New() runtime.Object {
return &apiregistration.APIService{}
}
// Destroy cleans up resources on shutdown.
func (r *StatusREST) Destroy() {
// Given that underlying store is shared with REST,
// we don't destroy it here explicitly.
}
// Get retrieves the object from the storage. It is required to support Patch.
func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
return r.store.Get(ctx, name, options)
}
// Update alters the status subset of an object.
func (r *StatusREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
// We are explicitly setting forceAllowCreate to false in the call to the underlying storage because
// subresources should never allow create on update.
return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options)
}
// GetResetFields implements rest.ResetFieldsStrategy
func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
return r.store.GetResetFields()
}

View File

@ -1,49 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme"
apiservicestorage "k8s.io/kube-aggregator/pkg/registry/apiservice/etcd"
)
// NewRESTStorage returns an APIGroupInfo object that will work against apiservice.
func NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter, shouldServeBeta bool) genericapiserver.APIGroupInfo {
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(apiregistration.GroupName, aggregatorscheme.Scheme, metav1.ParameterCodec, aggregatorscheme.Codecs)
storage := map[string]rest.Storage{}
if resource := "apiservices"; apiResourceConfigSource.ResourceEnabled(v1.SchemeGroupVersion.WithResource(resource)) {
apiServiceREST := apiservicestorage.NewREST(aggregatorscheme.Scheme, restOptionsGetter)
storage[resource] = apiServiceREST
storage[resource+"/status"] = apiservicestorage.NewStatusREST(aggregatorscheme.Scheme, apiServiceREST)
}
if len(storage) > 0 {
apiGroupInfo.VersionedResourcesStorageMap["v1"] = storage
}
return apiGroupInfo
}

View File

@ -1,196 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiservice
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
"k8s.io/kube-aggregator/pkg/apis/apiregistration/validation"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
type apiServerStrategy struct {
runtime.ObjectTyper
names.NameGenerator
}
// apiServerStrategy must implement rest.RESTCreateUpdateStrategy
var _ rest.RESTCreateUpdateStrategy = apiServerStrategy{}
var Strategy = apiServerStrategy{}
// NewStrategy creates a new apiServerStrategy.
func NewStrategy(typer runtime.ObjectTyper) rest.CreateUpdateResetFieldsStrategy {
return apiServerStrategy{typer, names.SimpleNameGenerator}
}
func (apiServerStrategy) NamespaceScoped() bool {
return false
}
func (apiServerStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
fields := map[fieldpath.APIVersion]*fieldpath.Set{
"apiregistration.k8s.io/v1": fieldpath.NewSet(
fieldpath.MakePathOrDie("status"),
),
"apiregistration.k8s.io/v1beta1": fieldpath.NewSet(
fieldpath.MakePathOrDie("status"),
),
}
return fields
}
func (apiServerStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
apiservice := obj.(*apiregistration.APIService)
apiservice.Status = apiregistration.APIServiceStatus{}
// mark local API services as immediately available on create
if apiservice.Spec.Service == nil {
apiregistration.SetAPIServiceCondition(apiservice, apiregistration.NewLocalAvailableAPIServiceCondition())
}
}
func (apiServerStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
newAPIService := obj.(*apiregistration.APIService)
oldAPIService := old.(*apiregistration.APIService)
newAPIService.Status = oldAPIService.Status
}
func (apiServerStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
return validation.ValidateAPIService(obj.(*apiregistration.APIService))
}
// WarningsOnCreate returns warnings for the creation of the given object.
func (apiServerStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
return nil
}
func (apiServerStrategy) AllowCreateOnUpdate() bool {
return false
}
func (apiServerStrategy) AllowUnconditionalUpdate() bool {
return false
}
func (apiServerStrategy) Canonicalize(obj runtime.Object) {
}
func (apiServerStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
return validation.ValidateAPIServiceUpdate(obj.(*apiregistration.APIService), old.(*apiregistration.APIService))
}
// WarningsOnUpdate returns warnings for the given update.
func (apiServerStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
return nil
}
type apiServerStatusStrategy struct {
runtime.ObjectTyper
names.NameGenerator
}
// NewStatusStrategy creates a new apiServerStatusStrategy.
func NewStatusStrategy(typer runtime.ObjectTyper) rest.UpdateResetFieldsStrategy {
return apiServerStatusStrategy{typer, names.SimpleNameGenerator}
}
func (apiServerStatusStrategy) NamespaceScoped() bool {
return false
}
func (apiServerStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
fields := map[fieldpath.APIVersion]*fieldpath.Set{
"apiregistration.k8s.io/v1": fieldpath.NewSet(
fieldpath.MakePathOrDie("spec"),
fieldpath.MakePathOrDie("metadata"),
),
"apiregistration.k8s.io/v1beta1": fieldpath.NewSet(
fieldpath.MakePathOrDie("spec"),
fieldpath.MakePathOrDie("metadata"),
),
}
return fields
}
func (apiServerStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
newAPIService := obj.(*apiregistration.APIService)
oldAPIService := old.(*apiregistration.APIService)
newAPIService.Spec = oldAPIService.Spec
newAPIService.Labels = oldAPIService.Labels
newAPIService.Annotations = oldAPIService.Annotations
newAPIService.Finalizers = oldAPIService.Finalizers
newAPIService.OwnerReferences = oldAPIService.OwnerReferences
}
func (apiServerStatusStrategy) AllowCreateOnUpdate() bool {
return false
}
func (apiServerStatusStrategy) AllowUnconditionalUpdate() bool {
return false
}
// Canonicalize normalizes the object after validation.
func (apiServerStatusStrategy) Canonicalize(obj runtime.Object) {
}
// ValidateUpdate validates an update of apiServerStatusStrategy.
func (apiServerStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
return validation.ValidateAPIServiceStatusUpdate(obj.(*apiregistration.APIService), old.(*apiregistration.APIService))
}
// WarningsOnUpdate returns warnings for the given update.
func (apiServerStatusStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
return nil
}
// GetAttrs returns the labels and fields of an API server for filtering purposes.
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
apiserver, ok := obj.(*apiregistration.APIService)
if !ok {
return nil, nil, fmt.Errorf("given object is not a APIService")
}
return labels.Set(apiserver.ObjectMeta.Labels), ToSelectableFields(apiserver), nil
}
// MatchAPIService is the filter used by the generic etcd backend to watch events
// from etcd to clients of the apiserver only interested in specific labels/fields.
func MatchAPIService(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
return storage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: GetAttrs,
}
}
// ToSelectableFields returns a field set that represents the object.
func ToSelectableFields(obj *apiregistration.APIService) fields.Set {
return generic.ObjectMetaFieldsSet(&obj.ObjectMeta, true)
}

View File

@ -1,471 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aggregator
import (
"fmt"
"reflect"
"sort"
"strings"
"k8s.io/kube-openapi/pkg/schemamutation"
"k8s.io/kube-openapi/pkg/util"
"k8s.io/kube-openapi/pkg/validation/spec"
)
const gvkKey = "x-kubernetes-group-version-kind"
// usedDefinitionForSpec returns a map with all used definitions in the provided spec as keys and true as values.
func usedDefinitionForSpec(root *spec.Swagger) map[string]bool {
usedDefinitions := map[string]bool{}
walkOnAllReferences(func(ref *spec.Ref) {
if refStr := ref.String(); refStr != "" && strings.HasPrefix(refStr, definitionPrefix) {
usedDefinitions[refStr[len(definitionPrefix):]] = true
}
}, root)
return usedDefinitions
}
// FilterSpecByPaths removes unnecessary paths and definitions used by those paths.
// i.e. if a Path removed by this function, all definitions used by it and not used
// anywhere else will also be removed.
func FilterSpecByPaths(sp *spec.Swagger, keepPathPrefixes []string) {
*sp = *FilterSpecByPathsWithoutSideEffects(sp, keepPathPrefixes)
}
// FilterSpecByPathsWithoutSideEffects removes unnecessary paths and definitions used by those paths.
// i.e. if a Path removed by this function, all definitions used by it and not used
// anywhere else will also be removed.
// It does not modify the input, but the output shares data structures with the input.
func FilterSpecByPathsWithoutSideEffects(sp *spec.Swagger, keepPathPrefixes []string) *spec.Swagger {
if sp.Paths == nil {
return sp
}
// Walk all references to find all used definitions. This function
// want to only deal with unused definitions resulted from filtering paths.
// Thus a definition will be removed only if it has been used before but
// it is unused because of a path prune.
initialUsedDefinitions := usedDefinitionForSpec(sp)
// First remove unwanted paths
prefixes := util.NewTrie(keepPathPrefixes)
ret := *sp
ret.Paths = &spec.Paths{
VendorExtensible: sp.Paths.VendorExtensible,
Paths: map[string]spec.PathItem{},
}
for path, pathItem := range sp.Paths.Paths {
if !prefixes.HasPrefix(path) {
continue
}
ret.Paths.Paths[path] = pathItem
}
// Walk all references to find all definition references.
usedDefinitions := usedDefinitionForSpec(&ret)
// Remove unused definitions
ret.Definitions = spec.Definitions{}
for k, v := range sp.Definitions {
if usedDefinitions[k] || !initialUsedDefinitions[k] {
ret.Definitions[k] = v
}
}
return &ret
}
// renameDefinitions renames definition references, without mutating the input.
// The output might share data structures with the input.
func renameDefinitions(s *spec.Swagger, renames map[string]string) *spec.Swagger {
refRenames := make(map[string]string, len(renames))
foundOne := false
for k, v := range renames {
refRenames[definitionPrefix+k] = definitionPrefix + v
if _, ok := s.Definitions[k]; ok {
foundOne = true
}
}
if !foundOne {
return s
}
ret := &spec.Swagger{}
*ret = *s
ret = schemamutation.ReplaceReferences(func(ref *spec.Ref) *spec.Ref {
refName := ref.String()
if newRef, found := refRenames[refName]; found {
ret := spec.MustCreateRef(newRef)
return &ret
}
return ref
}, ret)
renamedDefinitions := make(spec.Definitions, len(ret.Definitions))
for k, v := range ret.Definitions {
if newRef, found := renames[k]; found {
k = newRef
}
renamedDefinitions[k] = v
}
ret.Definitions = renamedDefinitions
return ret
}
// renameParameters renames parameter references, without mutating the input.
// The output might share data structures with the input.
func renameParameters(s *spec.Swagger, renames map[string]string) *spec.Swagger {
refRenames := make(map[string]string, len(renames))
foundOne := false
for k, v := range renames {
refRenames[parameterPrefix+k] = parameterPrefix + v
if _, ok := s.Parameters[k]; ok {
foundOne = true
}
}
if !foundOne {
return s
}
ret := &spec.Swagger{}
*ret = *s
ret = schemamutation.ReplaceReferences(func(ref *spec.Ref) *spec.Ref {
refName := ref.String()
if newRef, found := refRenames[refName]; found {
ret := spec.MustCreateRef(newRef)
return &ret
}
return ref
}, ret)
renamed := make(map[string]spec.Parameter, len(ret.Parameters))
for k, v := range ret.Parameters {
if newRef, found := renames[k]; found {
k = newRef
}
renamed[k] = v
}
ret.Parameters = renamed
return ret
}
// MergeSpecsIgnorePathConflictRenamingDefinitionsAndParameters is the same as
// MergeSpecs except it will ignore any path conflicts by keeping the paths of
// destination. It will rename definition and parameter conflicts.
func MergeSpecsIgnorePathConflictRenamingDefinitionsAndParameters(dest, source *spec.Swagger) error {
return mergeSpecs(dest, source, true, true, true)
}
// MergeSpecsIgnorePathConflictDeprecated is the same as MergeSpecs except it will ignore any path
// conflicts by keeping the paths of destination. It will rename definition and
// parameter conflicts.
func MergeSpecsIgnorePathConflictDeprecated(dest, source *spec.Swagger) error {
return mergeSpecs(dest, source, true, false, true)
}
// MergeSpecsFailOnDefinitionConflict is different from MergeSpecs as it fails if there is
// a definition or parameter conflict.
func MergeSpecsFailOnDefinitionConflict(dest, source *spec.Swagger) error {
return mergeSpecs(dest, source, false, false, false)
}
// MergeSpecs copies paths, definitions and parameters from source to dest, rename
// definitions if needed. It will fail on path conflicts.
//
// The destination is mutated, the source is not.
func MergeSpecs(dest, source *spec.Swagger) error {
return mergeSpecs(dest, source, true, true, false)
}
// mergeSpecs merges source into dest while resolving conflicts.
// The source is not mutated.
func mergeSpecs(dest, source *spec.Swagger, renameModelConflicts, renameParameterConflicts, ignorePathConflicts bool) (err error) {
// Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
if source.Paths == nil {
// When a source spec does not have any path, that means none of the definitions
// are used thus we should not do anything
return nil
}
if dest.Paths == nil {
dest.Paths = &spec.Paths{}
}
if ignorePathConflicts {
keepPaths := []string{}
hasConflictingPath := false
for k := range source.Paths.Paths {
if _, found := dest.Paths.Paths[k]; !found {
keepPaths = append(keepPaths, k)
} else {
hasConflictingPath = true
}
}
if len(keepPaths) == 0 {
// There is nothing to merge. All paths are conflicting.
return nil
}
if hasConflictingPath {
source = FilterSpecByPathsWithoutSideEffects(source, keepPaths)
}
}
// Check for model conflicts and rename to make definitions conflict-free (modulo different GVKs)
usedNames := map[string]bool{}
for k := range dest.Definitions {
usedNames[k] = true
}
renames := map[string]string{}
DEFINITIONLOOP:
for k, v := range source.Definitions {
existing, found := dest.Definitions[k]
if !found || deepEqualDefinitionsModuloGVKs(&existing, &v) {
// skip for now, we copy them after the rename loop
continue
}
if !renameModelConflicts {
return fmt.Errorf("model name conflict in merging OpenAPI spec: %s", k)
}
// Reuse previously renamed model if one exists
var newName string
i := 1
for found {
i++
newName = fmt.Sprintf("%s_v%d", k, i)
existing, found = dest.Definitions[newName]
if found && deepEqualDefinitionsModuloGVKs(&existing, &v) {
renames[k] = newName
continue DEFINITIONLOOP
}
}
_, foundInSource := source.Definitions[newName]
for usedNames[newName] || foundInSource {
i++
newName = fmt.Sprintf("%s_v%d", k, i)
_, foundInSource = source.Definitions[newName]
}
renames[k] = newName
usedNames[newName] = true
}
source = renameDefinitions(source, renames)
// Check for parameter conflicts and rename to make parameters conflict-free
usedNames = map[string]bool{}
for k := range dest.Parameters {
usedNames[k] = true
}
renames = map[string]string{}
PARAMETERLOOP:
for k, p := range source.Parameters {
existing, found := dest.Parameters[k]
if !found || reflect.DeepEqual(&existing, &p) {
// skip for now, we copy them after the rename loop
continue
}
if !renameParameterConflicts {
return fmt.Errorf("parameter name conflict in merging OpenAPI spec: %s", k)
}
// Reuse previously renamed parameter if one exists
var newName string
i := 1
for found {
i++
newName = fmt.Sprintf("%s_v%d", k, i)
existing, found = dest.Parameters[newName]
if found && reflect.DeepEqual(&existing, &p) {
renames[k] = newName
continue PARAMETERLOOP
}
}
_, foundInSource := source.Parameters[newName]
for usedNames[newName] || foundInSource {
i++
newName = fmt.Sprintf("%s_v%d", k, i)
_, foundInSource = source.Parameters[newName]
}
renames[k] = newName
usedNames[newName] = true
}
source = renameParameters(source, renames)
// Now without conflict (modulo different GVKs), copy definitions to dest
for k, v := range source.Definitions {
if existing, found := dest.Definitions[k]; !found {
if dest.Definitions == nil {
dest.Definitions = make(spec.Definitions, len(source.Definitions))
}
dest.Definitions[k] = v
} else if merged, changed, err := mergedGVKs(&existing, &v); err != nil {
return err
} else if changed {
existing.Extensions[gvkKey] = merged
}
}
// Now without conflict, copy parameters to dest
for k, v := range source.Parameters {
if _, found := dest.Parameters[k]; !found {
if dest.Parameters == nil {
dest.Parameters = make(map[string]spec.Parameter, len(source.Parameters))
}
dest.Parameters[k] = v
}
}
// Check for path conflicts
for k, v := range source.Paths.Paths {
if _, found := dest.Paths.Paths[k]; found {
return fmt.Errorf("unable to merge: duplicated path %s", k)
}
// PathItem may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
if dest.Paths.Paths == nil {
dest.Paths.Paths = map[string]spec.PathItem{}
}
dest.Paths.Paths[k] = v
}
return nil
}
// deepEqualDefinitionsModuloGVKs compares s1 and s2, but ignores the x-kubernetes-group-version-kind extension.
func deepEqualDefinitionsModuloGVKs(s1, s2 *spec.Schema) bool {
if s1 == nil {
return s2 == nil
} else if s2 == nil {
return false
}
if !reflect.DeepEqual(s1.Extensions, s2.Extensions) {
for k, v := range s1.Extensions {
if k == gvkKey {
continue
}
if !reflect.DeepEqual(v, s2.Extensions[k]) {
return false
}
}
len1 := len(s1.Extensions)
len2 := len(s2.Extensions)
if _, found := s1.Extensions[gvkKey]; found {
len1--
}
if _, found := s2.Extensions[gvkKey]; found {
len2--
}
if len1 != len2 {
return false
}
if s1.Extensions != nil {
shallowCopy := *s1
s1 = &shallowCopy
s1.Extensions = nil
}
if s2.Extensions != nil {
shallowCopy := *s2
s2 = &shallowCopy
s2.Extensions = nil
}
}
return reflect.DeepEqual(s1, s2)
}
// mergedGVKs merges the x-kubernetes-group-version-kind slices and returns the result, and whether
// s1's x-kubernetes-group-version-kind slice was changed at all.
func mergedGVKs(s1, s2 *spec.Schema) (interface{}, bool, error) {
gvk1, found1 := s1.Extensions[gvkKey]
gvk2, found2 := s2.Extensions[gvkKey]
if !found1 {
return gvk2, found2, nil
}
if !found2 {
return gvk1, false, nil
}
slice1, ok := gvk1.([]interface{})
if !ok {
return nil, false, fmt.Errorf("expected slice of GroupVersionKinds, got: %+v", slice1)
}
slice2, ok := gvk2.([]interface{})
if !ok {
return nil, false, fmt.Errorf("expected slice of GroupVersionKinds, got: %+v", slice2)
}
ret := make([]interface{}, len(slice1), len(slice1)+len(slice2))
keys := make([]string, 0, len(slice1)+len(slice2))
copy(ret, slice1)
seen := make(map[string]bool, len(slice1))
for _, x := range slice1 {
gvk, ok := x.(map[string]interface{})
if !ok {
return nil, false, fmt.Errorf(`expected {"group": <group>, "kind": <kind>, "version": <version>}, got: %#v`, x)
}
k := fmt.Sprintf("%s/%s.%s", gvk["group"], gvk["version"], gvk["kind"])
keys = append(keys, k)
seen[k] = true
}
changed := false
for _, x := range slice2 {
gvk, ok := x.(map[string]interface{})
if !ok {
return nil, false, fmt.Errorf(`expected {"group": <group>, "kind": <kind>, "version": <version>}, got: %#v`, x)
}
k := fmt.Sprintf("%s/%s.%s", gvk["group"], gvk["version"], gvk["kind"])
if seen[k] {
continue
}
ret = append(ret, x)
keys = append(keys, k)
changed = true
}
if changed {
sort.Sort(byKeys{ret, keys})
}
return ret, changed, nil
}
type byKeys struct {
values []interface{}
keys []string
}
func (b byKeys) Len() int {
return len(b.values)
}
func (b byKeys) Less(i, j int) bool {
return b.keys[i] < b.keys[j]
}
func (b byKeys) Swap(i, j int) {
b.values[i], b.values[j] = b.values[j], b.values[i]
b.keys[i], b.keys[j] = b.keys[j], b.keys[i]
}

View File

@ -1,163 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aggregator
import (
"strings"
"k8s.io/kube-openapi/pkg/validation/spec"
)
const (
definitionPrefix = "#/definitions/"
parameterPrefix = "#/parameters/"
)
// Run a readonlyReferenceWalker method on all references of an OpenAPI spec
type readonlyReferenceWalker struct {
// walkRefCallback will be called on each reference. The input will never be nil.
walkRefCallback func(ref *spec.Ref)
// The spec to walk through.
root *spec.Swagger
}
// walkOnAllReferences recursively walks on all references, while following references into definitions.
// it calls walkRef on each found reference.
func walkOnAllReferences(walkRef func(ref *spec.Ref), root *spec.Swagger) {
alreadyVisited := map[string]bool{}
walker := &readonlyReferenceWalker{
root: root,
}
walker.walkRefCallback = func(ref *spec.Ref) {
walkRef(ref)
refStr := ref.String()
if refStr == "" || !strings.HasPrefix(refStr, definitionPrefix) {
return
}
defName := refStr[len(definitionPrefix):]
if _, found := root.Definitions[defName]; found && !alreadyVisited[refStr] {
alreadyVisited[refStr] = true
def := root.Definitions[defName]
walker.walkSchema(&def)
}
}
walker.Start()
}
func (s *readonlyReferenceWalker) walkSchema(schema *spec.Schema) {
if schema == nil {
return
}
s.walkRefCallback(&schema.Ref)
var v *spec.Schema
if len(schema.Definitions)+len(schema.Properties)+len(schema.PatternProperties) > 0 {
v = &spec.Schema{}
}
for k := range schema.Definitions {
*v = schema.Definitions[k]
s.walkSchema(v)
}
for k := range schema.Properties {
*v = schema.Properties[k]
s.walkSchema(v)
}
for k := range schema.PatternProperties {
*v = schema.PatternProperties[k]
s.walkSchema(v)
}
for i := range schema.AllOf {
s.walkSchema(&schema.AllOf[i])
}
for i := range schema.AnyOf {
s.walkSchema(&schema.AnyOf[i])
}
for i := range schema.OneOf {
s.walkSchema(&schema.OneOf[i])
}
if schema.Not != nil {
s.walkSchema(schema.Not)
}
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
s.walkSchema(schema.AdditionalProperties.Schema)
}
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
s.walkSchema(schema.AdditionalItems.Schema)
}
if schema.Items != nil {
if schema.Items.Schema != nil {
s.walkSchema(schema.Items.Schema)
}
for i := range schema.Items.Schemas {
s.walkSchema(&schema.Items.Schemas[i])
}
}
}
func (s *readonlyReferenceWalker) walkParams(params []spec.Parameter) {
if params == nil {
return
}
for _, param := range params {
s.walkRefCallback(&param.Ref)
s.walkSchema(param.Schema)
if param.Items != nil {
s.walkRefCallback(&param.Items.Ref)
}
}
}
func (s *readonlyReferenceWalker) walkResponse(resp *spec.Response) {
if resp == nil {
return
}
s.walkRefCallback(&resp.Ref)
s.walkSchema(resp.Schema)
}
func (s *readonlyReferenceWalker) walkOperation(op *spec.Operation) {
if op == nil {
return
}
s.walkParams(op.Parameters)
if op.Responses == nil {
return
}
s.walkResponse(op.Responses.Default)
for _, r := range op.Responses.StatusCodeResponses {
s.walkResponse(&r)
}
}
func (s *readonlyReferenceWalker) Start() {
if s.root.Paths == nil {
return
}
for _, pathItem := range s.root.Paths.Paths {
s.walkParams(pathItem.Parameters)
s.walkOperation(pathItem.Delete)
s.walkOperation(pathItem.Get)
s.walkOperation(pathItem.Head)
s.walkOperation(pathItem.Options)
s.walkOperation(pathItem.Patch)
s.walkOperation(pathItem.Post)
s.walkOperation(pathItem.Put)
}
}

View File

@ -1,322 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openapiconv
import (
"strings"
klog "k8s.io/klog/v2"
builderutil "k8s.io/kube-openapi/pkg/builder3/util"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/validation/spec"
)
var OpenAPIV2DefPrefix = "#/definitions/"
var OpenAPIV3DefPrefix = "#/components/schemas/"
// ConvertV2ToV3 converts an OpenAPI V2 object into V3.
// Certain references may be shared between the V2 and V3 objects in the conversion.
func ConvertV2ToV3(v2Spec *spec.Swagger) *spec3.OpenAPI {
v3Spec := &spec3.OpenAPI{
Version: "3.0.0",
Info: v2Spec.Info,
ExternalDocs: ConvertExternalDocumentation(v2Spec.ExternalDocs),
Paths: ConvertPaths(v2Spec.Paths),
Components: ConvertComponents(v2Spec.SecurityDefinitions, v2Spec.Definitions, v2Spec.Responses, v2Spec.Produces),
}
return v3Spec
}
func ConvertExternalDocumentation(v2ED *spec.ExternalDocumentation) *spec3.ExternalDocumentation {
if v2ED == nil {
return nil
}
return &spec3.ExternalDocumentation{
ExternalDocumentationProps: spec3.ExternalDocumentationProps{
Description: v2ED.Description,
URL: v2ED.URL,
},
}
}
func ConvertComponents(v2SecurityDefinitions spec.SecurityDefinitions, v2Definitions spec.Definitions, v2Responses map[string]spec.Response, produces []string) *spec3.Components {
components := &spec3.Components{}
if v2Definitions != nil {
components.Schemas = make(map[string]*spec.Schema)
}
for s, schema := range v2Definitions {
components.Schemas[s] = ConvertSchema(&schema)
}
if v2SecurityDefinitions != nil {
components.SecuritySchemes = make(spec3.SecuritySchemes)
}
for s, securityScheme := range v2SecurityDefinitions {
components.SecuritySchemes[s] = ConvertSecurityScheme(securityScheme)
}
if v2Responses != nil {
components.Responses = make(map[string]*spec3.Response)
}
for r, response := range v2Responses {
components.Responses[r] = ConvertResponse(&response, produces)
}
return components
}
func ConvertSchema(v2Schema *spec.Schema) *spec.Schema {
if v2Schema == nil {
return nil
}
v3Schema := spec.Schema{
VendorExtensible: v2Schema.VendorExtensible,
SchemaProps: v2Schema.SchemaProps,
SwaggerSchemaProps: v2Schema.SwaggerSchemaProps,
ExtraProps: v2Schema.ExtraProps,
}
if refString := v2Schema.Ref.String(); refString != "" {
if idx := strings.Index(refString, OpenAPIV2DefPrefix); idx != -1 {
v3Schema.Ref = spec.MustCreateRef(OpenAPIV3DefPrefix + refString[idx+len(OpenAPIV2DefPrefix):])
} else {
klog.Errorf("Error: Swagger V2 Ref %s does not contain #/definitions\n", refString)
}
}
if v2Schema.Properties != nil {
v3Schema.Properties = make(map[string]spec.Schema)
for key, property := range v2Schema.Properties {
v3Schema.Properties[key] = *ConvertSchema(&property)
}
}
if v2Schema.Items != nil {
v3Schema.Items = &spec.SchemaOrArray{
Schema: ConvertSchema(v2Schema.Items.Schema),
Schemas: ConvertSchemaList(v2Schema.Items.Schemas),
}
}
if v2Schema.AdditionalProperties != nil {
v3Schema.AdditionalProperties = &spec.SchemaOrBool{
Schema: ConvertSchema(v2Schema.AdditionalProperties.Schema),
Allows: v2Schema.AdditionalProperties.Allows,
}
}
if v2Schema.AdditionalItems != nil {
v3Schema.AdditionalItems = &spec.SchemaOrBool{
Schema: ConvertSchema(v2Schema.AdditionalItems.Schema),
Allows: v2Schema.AdditionalItems.Allows,
}
}
return builderutil.WrapRefs(&v3Schema)
}
func ConvertSchemaList(v2SchemaList []spec.Schema) []spec.Schema {
if v2SchemaList == nil {
return nil
}
v3SchemaList := []spec.Schema{}
for _, s := range v2SchemaList {
v3SchemaList = append(v3SchemaList, *ConvertSchema(&s))
}
return v3SchemaList
}
func ConvertSecurityScheme(v2securityScheme *spec.SecurityScheme) *spec3.SecurityScheme {
if v2securityScheme == nil {
return nil
}
securityScheme := &spec3.SecurityScheme{
VendorExtensible: v2securityScheme.VendorExtensible,
SecuritySchemeProps: spec3.SecuritySchemeProps{
Description: v2securityScheme.Description,
Type: v2securityScheme.Type,
Name: v2securityScheme.Name,
In: v2securityScheme.In,
},
}
if v2securityScheme.Flow != "" {
securityScheme.Flows = make(map[string]*spec3.OAuthFlow)
securityScheme.Flows[v2securityScheme.Flow] = &spec3.OAuthFlow{
OAuthFlowProps: spec3.OAuthFlowProps{
AuthorizationUrl: v2securityScheme.AuthorizationURL,
TokenUrl: v2securityScheme.TokenURL,
Scopes: v2securityScheme.Scopes,
},
}
}
return securityScheme
}
func ConvertPaths(v2Paths *spec.Paths) *spec3.Paths {
if v2Paths == nil {
return nil
}
paths := &spec3.Paths{
VendorExtensible: v2Paths.VendorExtensible,
}
if v2Paths.Paths != nil {
paths.Paths = make(map[string]*spec3.Path)
}
for k, v := range v2Paths.Paths {
paths.Paths[k] = ConvertPathItem(v)
}
return paths
}
func ConvertPathItem(v2pathItem spec.PathItem) *spec3.Path {
path := &spec3.Path{
Refable: v2pathItem.Refable,
PathProps: spec3.PathProps{
Get: ConvertOperation(v2pathItem.Get),
Put: ConvertOperation(v2pathItem.Put),
Post: ConvertOperation(v2pathItem.Post),
Delete: ConvertOperation(v2pathItem.Delete),
Options: ConvertOperation(v2pathItem.Options),
Head: ConvertOperation(v2pathItem.Head),
Patch: ConvertOperation(v2pathItem.Patch),
},
VendorExtensible: v2pathItem.VendorExtensible,
}
for _, param := range v2pathItem.Parameters {
path.Parameters = append(path.Parameters, ConvertParameter(param))
}
return path
}
func ConvertOperation(v2Operation *spec.Operation) *spec3.Operation {
if v2Operation == nil {
return nil
}
operation := &spec3.Operation{
VendorExtensible: v2Operation.VendorExtensible,
OperationProps: spec3.OperationProps{
Description: v2Operation.Description,
ExternalDocs: ConvertExternalDocumentation(v2Operation.OperationProps.ExternalDocs),
Tags: v2Operation.Tags,
Summary: v2Operation.Summary,
Deprecated: v2Operation.Deprecated,
OperationId: v2Operation.ID,
},
}
for _, param := range v2Operation.Parameters {
if param.ParamProps.Name == "body" && param.ParamProps.Schema != nil {
operation.OperationProps.RequestBody = &spec3.RequestBody{
RequestBodyProps: spec3.RequestBodyProps{},
}
if v2Operation.Consumes != nil {
operation.RequestBody.Content = make(map[string]*spec3.MediaType)
}
for _, consumer := range v2Operation.Consumes {
operation.RequestBody.Content[consumer] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: ConvertSchema(param.ParamProps.Schema),
},
}
}
} else {
operation.Parameters = append(operation.Parameters, ConvertParameter(param))
}
}
operation.Responses = &spec3.Responses{ResponsesProps: spec3.ResponsesProps{
Default: ConvertResponse(v2Operation.Responses.Default, v2Operation.Produces),
},
VendorExtensible: v2Operation.Responses.VendorExtensible,
}
if v2Operation.Responses.StatusCodeResponses != nil {
operation.Responses.StatusCodeResponses = make(map[int]*spec3.Response)
}
for k, v := range v2Operation.Responses.StatusCodeResponses {
operation.Responses.StatusCodeResponses[k] = ConvertResponse(&v, v2Operation.Produces)
}
return operation
}
func ConvertResponse(v2Response *spec.Response, produces []string) *spec3.Response {
if v2Response == nil {
return nil
}
response := &spec3.Response{
Refable: ConvertRefableResponse(v2Response.Refable),
VendorExtensible: v2Response.VendorExtensible,
ResponseProps: spec3.ResponseProps{
Description: v2Response.Description,
},
}
if v2Response.Schema != nil {
if produces != nil {
response.Content = make(map[string]*spec3.MediaType)
}
for _, producer := range produces {
response.ResponseProps.Content[producer] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: ConvertSchema(v2Response.Schema),
},
}
}
}
return response
}
func ConvertParameter(v2Param spec.Parameter) *spec3.Parameter {
param := &spec3.Parameter{
Refable: ConvertRefableParameter(v2Param.Refable),
VendorExtensible: v2Param.VendorExtensible,
ParameterProps: spec3.ParameterProps{
Name: v2Param.Name,
Description: v2Param.Description,
In: v2Param.In,
Required: v2Param.Required,
Schema: ConvertSchema(v2Param.Schema),
AllowEmptyValue: v2Param.AllowEmptyValue,
},
}
// Convert SimpleSchema into Schema
if param.Schema == nil {
param.Schema = &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{v2Param.Type},
Format: v2Param.Format,
UniqueItems: v2Param.UniqueItems,
},
}
}
return param
}
func ConvertRefableParameter(refable spec.Refable) spec.Refable {
if refable.Ref.String() != "" {
return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/parameters/", "#/components/parameters/", 1))}
}
return refable
}
func ConvertRefableResponse(refable spec.Refable) spec.Refable {
if refable.Ref.String() != "" {
return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/responses/", "#/components/responses/", 1))}
}
return refable
}

29
vendor/modules.txt vendored
View File

@ -949,7 +949,6 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextension
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
k8s.io/apimachinery/pkg/api/meta/table
k8s.io/apimachinery/pkg/api/meta/testrestmapper
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/api/validation
@ -1103,7 +1102,6 @@ k8s.io/apiserver/pkg/endpoints/responsewriter
k8s.io/apiserver/pkg/endpoints/warning
k8s.io/apiserver/pkg/features
k8s.io/apiserver/pkg/quota/v1
k8s.io/apiserver/pkg/reconcilers
k8s.io/apiserver/pkg/registry/generic
k8s.io/apiserver/pkg/registry/generic/registry
k8s.io/apiserver/pkg/registry/rest
@ -1156,8 +1154,6 @@ k8s.io/apiserver/pkg/util/flowcontrol/metrics
k8s.io/apiserver/pkg/util/flowcontrol/request
k8s.io/apiserver/pkg/util/flushwriter
k8s.io/apiserver/pkg/util/peerproxy/metrics
k8s.io/apiserver/pkg/util/proxy
k8s.io/apiserver/pkg/util/proxy/metrics
k8s.io/apiserver/pkg/util/shufflesharding
k8s.io/apiserver/pkg/util/version
k8s.io/apiserver/pkg/util/webhook
@ -1507,7 +1503,6 @@ k8s.io/client-go/tools/leaderelection
k8s.io/client-go/tools/leaderelection/resourcelock
k8s.io/client-go/tools/metrics
k8s.io/client-go/tools/pager
k8s.io/client-go/tools/portforward
k8s.io/client-go/tools/record
k8s.io/client-go/tools/record/util
k8s.io/client-go/tools/reference
@ -1633,13 +1628,9 @@ k8s.io/kms/pkg/util
# k8s.io/kube-aggregator v0.31.3
## explicit; go 1.22.0
k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
k8s.io/kube-aggregator/pkg/apis/apiregistration/validation
k8s.io/kube-aggregator/pkg/apiserver
k8s.io/kube-aggregator/pkg/apiserver/scheme
k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset
k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake
k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme
@ -1647,29 +1638,10 @@ k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistr
k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake
k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1
k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake
k8s.io/kube-aggregator/pkg/client/informers/externalversions
k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration
k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1
k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1
k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces
k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1
k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1
k8s.io/kube-aggregator/pkg/controllers
k8s.io/kube-aggregator/pkg/controllers/openapi
k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator
k8s.io/kube-aggregator/pkg/controllers/openapiv3
k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator
k8s.io/kube-aggregator/pkg/controllers/status/local
k8s.io/kube-aggregator/pkg/controllers/status/metrics
k8s.io/kube-aggregator/pkg/controllers/status/remote
k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
## explicit; go 1.20
k8s.io/kube-openapi/cmd/openapi-gen
k8s.io/kube-openapi/cmd/openapi-gen/args
k8s.io/kube-openapi/pkg/aggregator
k8s.io/kube-openapi/pkg/builder
k8s.io/kube-openapi/pkg/builder3
k8s.io/kube-openapi/pkg/builder3/util
@ -1682,7 +1654,6 @@ k8s.io/kube-openapi/pkg/handler
k8s.io/kube-openapi/pkg/handler3
k8s.io/kube-openapi/pkg/internal
k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json
k8s.io/kube-openapi/pkg/openapiconv
k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/schemamutation
k8s.io/kube-openapi/pkg/spec3