Updating vendor against git@github.com:kubernetes/kubernetes.git:master (711790af23a231ade04c6c39d6bc5dcf1a996f73)
This commit is contained in:
parent
74c9229bf4
commit
247111eaf1
|
|
@ -102,7 +102,6 @@ replace (
|
|||
github.com/coreos/go-semver => github.com/coreos/go-semver v0.3.0
|
||||
github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
|
||||
github.com/coreos/pkg => github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
|
||||
github.com/coreos/rkt => github.com/coreos/rkt v1.30.0
|
||||
github.com/cpuguy83/go-md2man => github.com/cpuguy83/go-md2man v1.0.10
|
||||
github.com/creack/pty => github.com/creack/pty v1.1.7
|
||||
github.com/cyphar/filepath-securejoin => github.com/cyphar/filepath-securejoin v0.2.2
|
||||
|
|
@ -185,7 +184,7 @@ replace (
|
|||
github.com/golangplus/fmt => github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995
|
||||
github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e
|
||||
github.com/google/btree => github.com/google/btree v1.0.0
|
||||
github.com/google/cadvisor => github.com/google/cadvisor v0.34.0
|
||||
github.com/google/cadvisor => github.com/google/cadvisor v0.35.0
|
||||
github.com/google/go-cmp => github.com/google/go-cmp v0.3.0
|
||||
github.com/google/go-github => github.com/google/go-github v17.0.0+incompatible
|
||||
github.com/google/go-querystring => github.com/google/go-querystring v1.0.0
|
||||
|
|
@ -392,35 +391,35 @@ replace (
|
|||
gotest.tools/gotestsum => gotest.tools/gotestsum v0.3.5
|
||||
grpc.go4.org => grpc.go4.org v0.0.0-20170609214715-11d0a25b4919
|
||||
honnef.co/go/tools => honnef.co/go/tools v0.0.1-2019.2.2
|
||||
k8s.io/api => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/api
|
||||
k8s.io/apiextensions-apiserver => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apiextensions-apiserver
|
||||
k8s.io/apimachinery => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apimachinery
|
||||
k8s.io/apiserver => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apiserver
|
||||
k8s.io/cli-runtime => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cli-runtime
|
||||
k8s.io/client-go => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/client-go
|
||||
k8s.io/cloud-provider => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cloud-provider
|
||||
k8s.io/cluster-bootstrap => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cluster-bootstrap
|
||||
k8s.io/code-generator => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/code-generator
|
||||
k8s.io/component-base => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/component-base
|
||||
k8s.io/cri-api => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cri-api
|
||||
k8s.io/csi-translation-lib => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/csi-translation-lib
|
||||
k8s.io/api => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/api
|
||||
k8s.io/apiextensions-apiserver => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/apiextensions-apiserver
|
||||
k8s.io/apimachinery => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/apimachinery
|
||||
k8s.io/apiserver => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/apiserver
|
||||
k8s.io/cli-runtime => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/cli-runtime
|
||||
k8s.io/client-go => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/client-go
|
||||
k8s.io/cloud-provider => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/cloud-provider
|
||||
k8s.io/cluster-bootstrap => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/cluster-bootstrap
|
||||
k8s.io/code-generator => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/code-generator
|
||||
k8s.io/component-base => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/component-base
|
||||
k8s.io/cri-api => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/cri-api
|
||||
k8s.io/csi-translation-lib => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/csi-translation-lib
|
||||
k8s.io/gengo => k8s.io/gengo v0.0.0-20190822140433-26a664648505
|
||||
k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1
|
||||
k8s.io/klog => k8s.io/klog v1.0.0
|
||||
k8s.io/kube-aggregator => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-aggregator
|
||||
k8s.io/kube-controller-manager => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-controller-manager
|
||||
k8s.io/kube-aggregator => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kube-aggregator
|
||||
k8s.io/kube-controller-manager => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kube-controller-manager
|
||||
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a
|
||||
k8s.io/kube-proxy => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-proxy
|
||||
k8s.io/kube-scheduler => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-scheduler
|
||||
k8s.io/kubectl => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kubectl
|
||||
k8s.io/kubelet => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kubelet
|
||||
k8s.io/legacy-cloud-providers => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/legacy-cloud-providers
|
||||
k8s.io/metrics => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/metrics
|
||||
k8s.io/node-api => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/node-api
|
||||
k8s.io/kube-proxy => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kube-proxy
|
||||
k8s.io/kube-scheduler => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kube-scheduler
|
||||
k8s.io/kubectl => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kubectl
|
||||
k8s.io/kubelet => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kubelet
|
||||
k8s.io/legacy-cloud-providers => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/legacy-cloud-providers
|
||||
k8s.io/metrics => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/metrics
|
||||
k8s.io/node-api => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/node-api
|
||||
k8s.io/repo-infra => k8s.io/repo-infra v0.0.1-alpha.1
|
||||
k8s.io/sample-apiserver => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/sample-apiserver
|
||||
k8s.io/sample-cli-plugin => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/sample-cli-plugin
|
||||
k8s.io/sample-controller => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/sample-controller
|
||||
k8s.io/sample-apiserver => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/sample-apiserver
|
||||
k8s.io/sample-cli-plugin => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/sample-cli-plugin
|
||||
k8s.io/sample-controller => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/sample-controller
|
||||
k8s.io/system-validators => k8s.io/system-validators v1.0.4
|
||||
k8s.io/utils => k8s.io/utils v0.0.0-20191114184206-e782cd3c129f
|
||||
modernc.org/cc => modernc.org/cc v1.0.0
|
||||
|
|
@ -440,4 +439,4 @@ replace (
|
|||
|
||||
replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0
|
||||
|
||||
replace k8s.io/kubernetes => /tmp/ca-update-vendor.oDH2/kubernetes
|
||||
replace k8s.io/kubernetes => /tmp/ca-update-vendor.55jg/kubernetes
|
||||
|
|
|
|||
|
|
@ -102,8 +102,6 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9
|
|||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw=
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/rkt v1.30.0 h1:dFPDEVnztemjpNGzxWdJivfwNbUz2VAlYccttRb6H6A=
|
||||
github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
|
||||
|
|
@ -215,8 +213,8 @@ github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkY
|
|||
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/cadvisor v0.34.0 h1:No7G6U/TasplR9uNqyc5Jj0Bet5VSYsK5xLygOf4pUw=
|
||||
github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48=
|
||||
github.com/google/cadvisor v0.35.0 h1:qivoEm+iGqTrd0CKSmQidxfOxUxkNZovvYs/8G6B6ao=
|
||||
github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
|
|
|
|||
57
cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
57
cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
|
|
@ -19,6 +19,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -27,6 +28,7 @@ import (
|
|||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
|
@ -47,6 +49,25 @@ func DebugInfo(watches map[string][]string) map[string][]string {
|
|||
return out
|
||||
}
|
||||
|
||||
// findFileInAncestorDir returns the path to the parent directory that contains the specified file.
|
||||
// "" is returned if the lookup reaches the limit.
|
||||
func findFileInAncestorDir(current, file, limit string) (string, error) {
|
||||
for {
|
||||
fpath := path.Join(current, file)
|
||||
_, err := os.Stat(fpath)
|
||||
if err == nil {
|
||||
return current, nil
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
if current == limit {
|
||||
return "", nil
|
||||
}
|
||||
current = filepath.Dir(current)
|
||||
}
|
||||
}
|
||||
|
||||
func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoFactory, hasNetwork, hasFilesystem bool) (info.ContainerSpec, error) {
|
||||
var spec info.ContainerSpec
|
||||
|
||||
|
|
@ -100,7 +121,12 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
|||
if ok {
|
||||
if utils.FileExists(cpusetRoot) {
|
||||
spec.HasCpu = true
|
||||
mask := readString(cpusetRoot, "cpuset.cpus")
|
||||
mask := ""
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
mask = readString(cpusetRoot, "cpuset.cpus.effective")
|
||||
} else {
|
||||
mask = readString(cpusetRoot, "cpuset.cpus")
|
||||
}
|
||||
spec.Cpu.Mask = utils.FixCpuMask(mask, mi.NumCores)
|
||||
}
|
||||
}
|
||||
|
|
@ -108,11 +134,24 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
|||
// Memory
|
||||
memoryRoot, ok := cgroupPaths["memory"]
|
||||
if ok {
|
||||
if utils.FileExists(memoryRoot) {
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Limit = readUInt64(memoryRoot, "memory.limit_in_bytes")
|
||||
spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.memsw.limit_in_bytes")
|
||||
spec.Memory.Reservation = readUInt64(memoryRoot, "memory.soft_limit_in_bytes")
|
||||
if !cgroups.IsCgroup2UnifiedMode() {
|
||||
if utils.FileExists(memoryRoot) {
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Limit = readUInt64(memoryRoot, "memory.limit_in_bytes")
|
||||
spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.memsw.limit_in_bytes")
|
||||
spec.Memory.Reservation = readUInt64(memoryRoot, "memory.soft_limit_in_bytes")
|
||||
}
|
||||
} else {
|
||||
memoryRoot, err := findFileInAncestorDir(memoryRoot, "memory.max", "/sys/fs/cgroup")
|
||||
if err != nil {
|
||||
return spec, err
|
||||
}
|
||||
if memoryRoot != "" {
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Reservation = readUInt64(memoryRoot, "memory.high")
|
||||
spec.Memory.Limit = readUInt64(memoryRoot, "memory.max")
|
||||
spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.swap.max")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -128,7 +167,11 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
|||
spec.HasNetwork = hasNetwork
|
||||
spec.HasFilesystem = hasFilesystem
|
||||
|
||||
if blkioRoot, ok := cgroupPaths["blkio"]; ok && utils.FileExists(blkioRoot) {
|
||||
ioControllerName := "blkio"
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
ioControllerName = "io"
|
||||
}
|
||||
if blkioRoot, ok := cgroupPaths[ioControllerName]; ok && utils.FileExists(blkioRoot) {
|
||||
spec.HasDiskIo = true
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ type ContainerType int
|
|||
const (
|
||||
ContainerTypeRaw ContainerType = iota
|
||||
ContainerTypeDocker
|
||||
ContainerTypeRkt
|
||||
ContainerTypeSystemd
|
||||
ContainerTypeCrio
|
||||
ContainerTypeContainerd
|
||||
|
|
|
|||
|
|
@ -47,6 +47,7 @@ func GetCgroupSubsystems(includedMetrics container.MetricSet) (CgroupSubsystems,
|
|||
//currently we only support disable blkio subsystem
|
||||
if !includedMetrics.Has(container.DiskIOMetrics) {
|
||||
disableCgroups["blkio"] = struct{}{}
|
||||
disableCgroups["io"] = struct{}{}
|
||||
}
|
||||
return getCgroupSubsystemsHelper(allCgroups, disableCgroups)
|
||||
}
|
||||
|
|
@ -109,6 +110,7 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{
|
|||
"pids": {},
|
||||
"cpuset": {},
|
||||
"blkio": {},
|
||||
"io": {},
|
||||
"devices": {},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@ import (
|
|||
const (
|
||||
LabelSystemRoot = "root"
|
||||
LabelDockerImages = "docker-images"
|
||||
LabelRktImages = "rkt-images"
|
||||
LabelCrioImages = "crio-images"
|
||||
)
|
||||
|
||||
|
|
@ -118,7 +117,6 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
|||
fsInfo.mounts[mount.Mountpoint] = mount
|
||||
}
|
||||
|
||||
fsInfo.addRktImagesLabel(context, mounts)
|
||||
// need to call this before the log line below printing out the partitions, as this function may
|
||||
// add a "partition" for devicemapper to fsInfo.partitions
|
||||
fsInfo.addDockerImagesLabel(context, mounts)
|
||||
|
|
@ -167,19 +165,22 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma
|
|||
|
||||
supportedFsType := map[string]bool{
|
||||
// all ext systems are checked through prefix.
|
||||
"btrfs": true,
|
||||
"tmpfs": true,
|
||||
"xfs": true,
|
||||
"zfs": true,
|
||||
"btrfs": true,
|
||||
"overlay": true,
|
||||
"tmpfs": true,
|
||||
"xfs": true,
|
||||
"zfs": true,
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
if !strings.HasPrefix(mount.Fstype, "ext") && !supportedFsType[mount.Fstype] {
|
||||
continue
|
||||
}
|
||||
// Avoid bind mounts.
|
||||
// Avoid bind mounts, exclude tmpfs.
|
||||
if _, ok := partitions[mount.Source]; ok {
|
||||
continue
|
||||
if mount.Fstype != "tmpfs" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
hasPrefix := false
|
||||
|
|
@ -193,6 +194,10 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma
|
|||
continue
|
||||
}
|
||||
|
||||
// using mountpoint to replace device once fstype it tmpfs
|
||||
if mount.Fstype == "tmpfs" {
|
||||
mount.Source = mount.Mountpoint
|
||||
}
|
||||
// btrfs fix: following workaround fixes wrong btrfs Major and Minor Ids reported in /proc/self/mountinfo.
|
||||
// instead of using values from /proc/self/mountinfo we use stat to get Ids from btrfs mount point
|
||||
if mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
|
||||
|
|
@ -205,6 +210,11 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma
|
|||
}
|
||||
}
|
||||
|
||||
// overlay fix: Making mount source unique for all overlay mounts, using the mount's major and minor ids.
|
||||
if mount.Fstype == "overlay" {
|
||||
mount.Source = fmt.Sprintf("%s_%d-%d", mount.Source, mount.Major, mount.Minor)
|
||||
}
|
||||
|
||||
partitions[mount.Source] = partition{
|
||||
fsType: mount.Fstype,
|
||||
mountpoint: mount.Mountpoint,
|
||||
|
|
@ -290,20 +300,6 @@ func (self *RealFsInfo) addCrioImagesLabel(context Context, mounts []*mount.Info
|
|||
}
|
||||
}
|
||||
|
||||
func (self *RealFsInfo) addRktImagesLabel(context Context, mounts []*mount.Info) {
|
||||
if context.RktPath != "" {
|
||||
rktPath := context.RktPath
|
||||
rktImagesPaths := map[string]struct{}{
|
||||
"/": {},
|
||||
}
|
||||
for rktPath != "/" && rktPath != "." {
|
||||
rktImagesPaths[rktPath] = struct{}{}
|
||||
rktPath = filepath.Dir(rktPath)
|
||||
}
|
||||
self.updateContainerImagesPath(LabelRktImages, mounts, rktImagesPaths)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a list of possible mount points for docker image management from the docker root directory.
|
||||
// Right now, we look for each type of supported graph driver directories, but we can do better by parsing
|
||||
// some of the context from `docker info`.
|
||||
|
|
|
|||
|
|
@ -20,9 +20,8 @@ import (
|
|||
|
||||
type Context struct {
|
||||
// docker root directory.
|
||||
Docker DockerContext
|
||||
RktPath string
|
||||
Crio CrioContext
|
||||
Docker DockerContext
|
||||
Crio CrioContext
|
||||
}
|
||||
|
||||
type DockerContext struct {
|
||||
|
|
|
|||
|
|
@ -38,9 +38,10 @@ type FsInfo struct {
|
|||
type Node struct {
|
||||
Id int `json:"node_id"`
|
||||
// Per-node memory
|
||||
Memory uint64 `json:"memory"`
|
||||
Cores []Core `json:"cores"`
|
||||
Caches []Cache `json:"caches"`
|
||||
Memory uint64 `json:"memory"`
|
||||
HugePages []HugePagesInfo `json:"hugepages"`
|
||||
Cores []Core `json:"cores"`
|
||||
Caches []Cache `json:"caches"`
|
||||
}
|
||||
|
||||
type Core struct {
|
||||
|
|
|
|||
|
|
@ -17,10 +17,8 @@ package machine
|
|||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/parsers/operatingsystem"
|
||||
|
|
@ -54,45 +52,6 @@ func getInfoFromFiles(filePaths string) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
// GetHugePagesInfo returns information about pre-allocated huge pages
|
||||
func GetHugePagesInfo() ([]info.HugePagesInfo, error) {
|
||||
var hugePagesInfo []info.HugePagesInfo
|
||||
files, err := ioutil.ReadDir(hugepagesDirectory)
|
||||
if err != nil {
|
||||
// treat as non-fatal since kernels and machine can be
|
||||
// configured to disable hugepage support
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
for _, st := range files {
|
||||
nameArray := strings.Split(st.Name(), "-")
|
||||
pageSizeArray := strings.Split(nameArray[1], "kB")
|
||||
pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
|
||||
numFile := hugepagesDirectory + st.Name() + "/nr_hugepages"
|
||||
val, err := ioutil.ReadFile(numFile)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
var numPages uint64
|
||||
// we use sscanf as the file as a new-line that trips up ParseUint
|
||||
// it returns the number of tokens successfully parsed, so if
|
||||
// n != 1, it means we were unable to parse a number from the file
|
||||
n, err := fmt.Sscanf(string(val), "%d", &numPages)
|
||||
if err != nil || n != 1 {
|
||||
return hugePagesInfo, fmt.Errorf("could not parse file %v contents %q", numFile, string(val))
|
||||
}
|
||||
|
||||
hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{
|
||||
NumPages: numPages,
|
||||
PageSize: pageSize,
|
||||
})
|
||||
}
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
|
||||
func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.MachineInfo, error) {
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
|
|
@ -100,6 +59,9 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
|
|||
}
|
||||
|
||||
cpuinfo, err := ioutil.ReadFile(filepath.Join(rootFs, "/proc/cpuinfo"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clockSpeed, err := GetClockSpeed(cpuinfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -110,7 +72,7 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
|
|||
return nil, err
|
||||
}
|
||||
|
||||
hugePagesInfo, err := GetHugePagesInfo()
|
||||
hugePagesInfo, err := GetHugePagesInfo(hugepagesDirectory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import (
|
|||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
// s390/s390x changes
|
||||
"runtime"
|
||||
|
||||
|
|
@ -49,6 +50,7 @@ var (
|
|||
|
||||
const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
|
||||
const cpuBusPath = "/sys/bus/cpu/devices/"
|
||||
const nodePath = "/sys/devices/system/node"
|
||||
|
||||
// GetClockSpeed returns the CPU clock speed, given a []byte formatted as the /proc/cpuinfo file.
|
||||
func GetClockSpeed(procInfo []byte) (uint64, error) {
|
||||
|
|
@ -191,6 +193,47 @@ func getNodeIdFromCpuBus(cpuBusPath string, threadId int) (int, error) {
|
|||
return nodeId, nil
|
||||
}
|
||||
|
||||
// GetHugePagesInfo returns information about pre-allocated huge pages
|
||||
// hugepagesDirectory should be top directory of hugepages
|
||||
// Such as: /sys/kernel/mm/hugepages/
|
||||
func GetHugePagesInfo(hugepagesDirectory string) ([]info.HugePagesInfo, error) {
|
||||
var hugePagesInfo []info.HugePagesInfo
|
||||
files, err := ioutil.ReadDir(hugepagesDirectory)
|
||||
if err != nil {
|
||||
// treat as non-fatal since kernels and machine can be
|
||||
// configured to disable hugepage support
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
for _, st := range files {
|
||||
nameArray := strings.Split(st.Name(), "-")
|
||||
pageSizeArray := strings.Split(nameArray[1], "kB")
|
||||
pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
|
||||
numFile := hugepagesDirectory + st.Name() + "/nr_hugepages"
|
||||
val, err := ioutil.ReadFile(numFile)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
var numPages uint64
|
||||
// we use sscanf as the file as a new-line that trips up ParseUint
|
||||
// it returns the number of tokens successfully parsed, so if
|
||||
// n != 1, it means we were unable to parse a number from the file
|
||||
n, err := fmt.Sscanf(string(val), "%d", &numPages)
|
||||
if err != nil || n != 1 {
|
||||
return hugePagesInfo, fmt.Errorf("could not parse file %v contents %q", numFile, string(val))
|
||||
}
|
||||
|
||||
hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{
|
||||
NumPages: numPages,
|
||||
PageSize: pageSize,
|
||||
})
|
||||
}
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
|
||||
func GetTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) {
|
||||
nodes := []info.Node{}
|
||||
|
||||
|
|
@ -352,6 +395,15 @@ func addNode(nodes *[]info.Node, id int) (int, error) {
|
|||
}
|
||||
node.Memory = uint64(m)
|
||||
}
|
||||
// Look for per-node hugepages info using node id
|
||||
// Such as: /sys/devices/system/node/node%d/hugepages
|
||||
hugepagesDirectory := fmt.Sprintf("%s/node%d/hugepages/", nodePath, id)
|
||||
hugePagesInfo, err := GetHugePagesInfo(hugepagesDirectory)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
node.HugePages = hugePagesInfo
|
||||
|
||||
*nodes = append(*nodes, node)
|
||||
idx = len(*nodes) - 1
|
||||
}
|
||||
|
|
|
|||
|
|
@ -185,6 +185,9 @@ func (c *containerData) getCgroupPath(cgroups string) (string, error) {
|
|||
if cgroups == "-" {
|
||||
return "/", nil
|
||||
}
|
||||
if strings.HasPrefix(cgroups, "0::") {
|
||||
return cgroups[3:], nil
|
||||
}
|
||||
matches := cgroupPathRegExp.FindSubmatch([]byte(cgroups))
|
||||
if len(matches) != 2 {
|
||||
klog.V(3).Infof("failed to get memory cgroup path from %q", cgroups)
|
||||
|
|
|
|||
|
|
@ -918,13 +918,15 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
devicesCgroupPath, err := handler.GetCgroupPath("devices")
|
||||
if err != nil {
|
||||
klog.Warningf("Error getting devices cgroup path: %v", err)
|
||||
} else {
|
||||
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
|
||||
if !cgroups.IsCgroup2UnifiedMode() {
|
||||
devicesCgroupPath, err := handler.GetCgroupPath("devices")
|
||||
if err != nil {
|
||||
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
|
||||
klog.Warningf("Error getting devices cgroup path: %v", err)
|
||||
} else {
|
||||
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1119,9 +1121,6 @@ func (self *manager) watchForNewContainers(quit chan error) error {
|
|||
switch {
|
||||
case event.EventType == watcher.ContainerAdd:
|
||||
switch event.WatchSource {
|
||||
// the Rkt and Raw watchers can race, and if Raw wins, we want Rkt to override and create a new handler for Rkt containers
|
||||
case watcher.Rkt:
|
||||
err = self.overrideContainer(event.Name, event.WatchSource)
|
||||
default:
|
||||
err = self.createContainer(event.Name, event.WatchSource)
|
||||
}
|
||||
|
|
|
|||
2
cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
2
cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
|
|
@ -66,10 +66,10 @@ func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats,
|
|||
}
|
||||
|
||||
cfd, err := os.Open(path)
|
||||
defer cfd.Close()
|
||||
if err != nil {
|
||||
return info.LoadStats{}, fmt.Errorf("failed to open cgroup path %s: %q", path, err)
|
||||
}
|
||||
defer cfd.Close()
|
||||
|
||||
stats, err := getLoadStats(self.familyId, cfd, self.conn)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ type ContainerWatchSource int
|
|||
|
||||
const (
|
||||
Raw ContainerWatchSource = iota
|
||||
Rkt
|
||||
)
|
||||
|
||||
// ContainerEvent represents a
|
||||
|
|
|
|||
|
|
@ -52,23 +52,26 @@ func (RealClock) Since(ts time.Time) time.Duration {
|
|||
return time.Since(ts)
|
||||
}
|
||||
|
||||
// Same as time.After(d).
|
||||
// After is the same as time.After(d).
|
||||
func (RealClock) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
}
|
||||
|
||||
// NewTimer returns a new Timer.
|
||||
func (RealClock) NewTimer(d time.Duration) Timer {
|
||||
return &realTimer{
|
||||
timer: time.NewTimer(d),
|
||||
}
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker.
|
||||
func (RealClock) NewTicker(d time.Duration) Ticker {
|
||||
return &realTicker{
|
||||
ticker: time.NewTicker(d),
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep pauses the RealClock for duration d.
|
||||
func (RealClock) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
|
@ -94,12 +97,14 @@ type fakeClockWaiter struct {
|
|||
destChan chan time.Time
|
||||
}
|
||||
|
||||
// NewFakePassiveClock returns a new FakePassiveClock.
|
||||
func NewFakePassiveClock(t time.Time) *FakePassiveClock {
|
||||
return &FakePassiveClock{
|
||||
time: t,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFakeClock returns a new FakeClock
|
||||
func NewFakeClock(t time.Time) *FakeClock {
|
||||
return &FakeClock{
|
||||
FakePassiveClock: *NewFakePassiveClock(t),
|
||||
|
|
@ -120,14 +125,14 @@ func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
|
|||
return f.time.Sub(ts)
|
||||
}
|
||||
|
||||
// Sets the time.
|
||||
// SetTime sets the time on the FakePassiveClock.
|
||||
func (f *FakePassiveClock) SetTime(t time.Time) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.time = t
|
||||
}
|
||||
|
||||
// Fake version of time.After(d).
|
||||
// After is the Fake version of time.After(d).
|
||||
func (f *FakeClock) After(d time.Duration) <-chan time.Time {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
|
@ -140,7 +145,7 @@ func (f *FakeClock) After(d time.Duration) <-chan time.Time {
|
|||
return ch
|
||||
}
|
||||
|
||||
// Fake version of time.NewTimer(d).
|
||||
// NewTimer is the Fake version of time.NewTimer(d).
|
||||
func (f *FakeClock) NewTimer(d time.Duration) Timer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
|
@ -157,6 +162,7 @@ func (f *FakeClock) NewTimer(d time.Duration) Timer {
|
|||
return timer
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker.
|
||||
func (f *FakeClock) NewTicker(d time.Duration) Ticker {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
|
@ -174,14 +180,14 @@ func (f *FakeClock) NewTicker(d time.Duration) Ticker {
|
|||
}
|
||||
}
|
||||
|
||||
// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer
|
||||
// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer
|
||||
func (f *FakeClock) Step(d time.Duration) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.setTimeLocked(f.time.Add(d))
|
||||
}
|
||||
|
||||
// Sets the time.
|
||||
// SetTime sets the time on a FakeClock.
|
||||
func (f *FakeClock) SetTime(t time.Time) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
|
@ -219,7 +225,7 @@ func (f *FakeClock) setTimeLocked(t time.Time) {
|
|||
f.waiters = newWaiters
|
||||
}
|
||||
|
||||
// Returns true if After has been called on f but not yet satisfied (so you can
|
||||
// HasWaiters returns true if After has been called on f but not yet satisfied (so you can
|
||||
// write race-free tests).
|
||||
func (f *FakeClock) HasWaiters() bool {
|
||||
f.lock.RLock()
|
||||
|
|
@ -227,6 +233,7 @@ func (f *FakeClock) HasWaiters() bool {
|
|||
return len(f.waiters) > 0
|
||||
}
|
||||
|
||||
// Sleep pauses the FakeClock for duration d.
|
||||
func (f *FakeClock) Sleep(d time.Duration) {
|
||||
f.Step(d)
|
||||
}
|
||||
|
|
@ -248,24 +255,25 @@ func (i *IntervalClock) Since(ts time.Time) time.Duration {
|
|||
return i.Time.Sub(ts)
|
||||
}
|
||||
|
||||
// Unimplemented, will panic.
|
||||
// After is currently unimplemented, will panic.
|
||||
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||
func (*IntervalClock) After(d time.Duration) <-chan time.Time {
|
||||
panic("IntervalClock doesn't implement After")
|
||||
}
|
||||
|
||||
// Unimplemented, will panic.
|
||||
// NewTimer is currently unimplemented, will panic.
|
||||
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||
func (*IntervalClock) NewTimer(d time.Duration) Timer {
|
||||
panic("IntervalClock doesn't implement NewTimer")
|
||||
}
|
||||
|
||||
// Unimplemented, will panic.
|
||||
// NewTicker is currently unimplemented, will panic.
|
||||
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||
func (*IntervalClock) NewTicker(d time.Duration) Ticker {
|
||||
panic("IntervalClock doesn't implement NewTicker")
|
||||
}
|
||||
|
||||
// Sleep is currently unimplemented; will panic.
|
||||
func (*IntervalClock) Sleep(d time.Duration) {
|
||||
panic("IntervalClock doesn't implement Sleep")
|
||||
}
|
||||
|
|
@ -355,6 +363,7 @@ func (f *fakeTimer) Reset(d time.Duration) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Ticker defines the Ticker interface
|
||||
type Ticker interface {
|
||||
C() <-chan time.Time
|
||||
Stop()
|
||||
|
|
|
|||
|
|
@ -173,13 +173,6 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
cn, ok := w.(http.CloseNotifier)
|
||||
if !ok {
|
||||
err := fmt.Errorf("unable to start watch - can't get http.CloseNotifier: %#v", w)
|
||||
utilruntime.HandleError(err)
|
||||
s.Scope.err(errors.NewInternalError(err), w, req)
|
||||
return
|
||||
}
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
err := fmt.Errorf("unable to start watch - can't get http.Flusher: %#v", w)
|
||||
|
|
@ -214,9 +207,11 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||
outEvent := &metav1.WatchEvent{}
|
||||
buf := &bytes.Buffer{}
|
||||
ch := s.Watching.ResultChan()
|
||||
done := req.Context().Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-cn.CloseNotify():
|
||||
case <-done:
|
||||
return
|
||||
case <-timeoutCh:
|
||||
return
|
||||
|
|
|
|||
|
|
@ -43,7 +43,9 @@ go_library(
|
|||
importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters",
|
||||
importpath = "k8s.io/apiserver/pkg/server/filters",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
|
@ -53,6 +55,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
|||
|
|
@ -18,11 +18,16 @@ package filters
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown.
|
||||
|
|
@ -38,7 +43,14 @@ func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningReque
|
|||
|
||||
if !longRunning(req, requestInfo) {
|
||||
if err := wg.Add(1); err != nil {
|
||||
http.Error(w, "apiserver is shutting down.", http.StatusInternalServerError)
|
||||
// When apiserver is shutting down, signal clients to retry
|
||||
// There is a good chance the client hit a different server, so a tight retry is good for client responsiveness.
|
||||
w.Header().Add("Retry-After", "1")
|
||||
w.Header().Set("Content-Type", runtime.ContentTypeJSON)
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
statusErr := apierrors.NewServiceUnavailable("apiserver is shutting down").Status()
|
||||
w.WriteHeader(int(statusErr.Code))
|
||||
fmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr))
|
||||
return
|
||||
}
|
||||
defer wg.Done()
|
||||
|
|
|
|||
|
|
@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
|||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["exec.go"],
|
||||
srcs = [
|
||||
"exec.go",
|
||||
"metrics.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec",
|
||||
importpath = "k8s.io/client-go/plugin/pkg/client/auth/exec",
|
||||
visibility = ["//visibility:public"],
|
||||
|
|
@ -16,6 +19,7 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/transport:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/connrotation:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
|
|
@ -26,7 +30,10 @@ go_library(
|
|||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["exec_test.go"],
|
||||
srcs = [
|
||||
"exec_test.go",
|
||||
"metrics_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
|
@ -34,6 +41,7 @@ go_test(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/pkg/apis/clientauthentication:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/transport:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
|||
28
cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
generated
vendored
28
cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
generated
vendored
|
|
@ -20,6 +20,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -42,6 +43,7 @@ import (
|
|||
"k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
|
||||
"k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/metrics"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/client-go/util/connrotation"
|
||||
"k8s.io/klog"
|
||||
|
|
@ -260,6 +262,8 @@ func (a *Authenticator) cert() (*tls.Certificate, error) {
|
|||
func (a *Authenticator) getCreds() (*credentials, error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
defer expirationMetrics.report(time.Now)
|
||||
|
||||
if a.cachedCreds != nil && !a.credsExpired() {
|
||||
return a.cachedCreds, nil
|
||||
}
|
||||
|
|
@ -267,6 +271,7 @@ func (a *Authenticator) getCreds() (*credentials, error) {
|
|||
if err := a.refreshCredsLocked(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a.cachedCreds, nil
|
||||
}
|
||||
|
||||
|
|
@ -355,6 +360,17 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed parsing client key/certificate: %v", err)
|
||||
}
|
||||
|
||||
// Leaf is initialized to be nil:
|
||||
// https://golang.org/pkg/crypto/tls/#X509KeyPair
|
||||
// Leaf certificate is the first certificate:
|
||||
// https://golang.org/pkg/crypto/tls/#Certificate
|
||||
// Populating leaf is useful for quickly accessing the underlying x509
|
||||
// certificate values.
|
||||
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed parsing client leaf certificate: %v", err)
|
||||
}
|
||||
newCreds.cert = &cert
|
||||
}
|
||||
|
||||
|
|
@ -362,10 +378,20 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err
|
|||
a.cachedCreds = newCreds
|
||||
// Only close all connections when TLS cert rotates. Token rotation doesn't
|
||||
// need the extra noise.
|
||||
if len(a.onRotateList) > 0 && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
|
||||
if oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
|
||||
// Can be nil if the exec auth plugin only returned token auth.
|
||||
if oldCreds.cert != nil && oldCreds.cert.Leaf != nil {
|
||||
metrics.ClientCertRotationAge.Observe(time.Now().Sub(oldCreds.cert.Leaf.NotBefore))
|
||||
}
|
||||
for _, onRotate := range a.onRotateList {
|
||||
onRotate()
|
||||
}
|
||||
}
|
||||
|
||||
expiry := time.Time{}
|
||||
if a.cachedCreds.cert != nil && a.cachedCreds.cert.Leaf != nil {
|
||||
expiry = a.cachedCreds.cert.Leaf.NotAfter
|
||||
}
|
||||
expirationMetrics.set(a, expiry)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
64
cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
generated
vendored
Normal file
64
cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package exec
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/tools/metrics"
|
||||
)
|
||||
|
||||
type certificateExpirationTracker struct {
|
||||
mu sync.RWMutex
|
||||
m map[*Authenticator]time.Time
|
||||
earliest time.Time
|
||||
}
|
||||
|
||||
var expirationMetrics = &certificateExpirationTracker{m: map[*Authenticator]time.Time{}}
|
||||
|
||||
// set stores the given expiration time and updates the updates earliest.
|
||||
func (c *certificateExpirationTracker) set(a *Authenticator, t time.Time) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.m[a] = t
|
||||
|
||||
// update earliest
|
||||
earliest := time.Time{}
|
||||
for _, t := range c.m {
|
||||
if t.IsZero() {
|
||||
continue
|
||||
}
|
||||
if earliest.IsZero() || earliest.After(t) {
|
||||
earliest = t
|
||||
}
|
||||
}
|
||||
c.earliest = earliest
|
||||
}
|
||||
|
||||
// report reports the ttl to the earliest reported expiration time.
|
||||
// If no Authenticators have reported a certificate expiration, this reports nil.
|
||||
func (c *certificateExpirationTracker) report(now func() time.Time) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
if c.earliest.IsZero() {
|
||||
metrics.ClientCertTTL.Set(nil)
|
||||
} else {
|
||||
ttl := c.earliest.Sub(now())
|
||||
metrics.ClientCertTTL.Set(&ttl)
|
||||
}
|
||||
}
|
||||
|
|
@ -806,19 +806,24 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
|
|||
r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
|
||||
}
|
||||
if err != nil {
|
||||
// "Connection reset by peer" is usually a transient error.
|
||||
// "Connection reset by peer", "Connection refused" or "apiserver is shutting down" are usually a transient errors.
|
||||
// Thus in case of "GET" operations, we simply retry it.
|
||||
// We are not automatically retrying "write" operations, as
|
||||
// they are not idempotent.
|
||||
if !net.IsConnectionReset(err) || r.verb != "GET" {
|
||||
if r.verb != "GET" {
|
||||
return err
|
||||
}
|
||||
// For the purpose of retry, we set the artificial "retry-after" response.
|
||||
// TODO: Should we clean the original response if it exists?
|
||||
resp = &http.Response{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Header: http.Header{"Retry-After": []string{"1"}},
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
// For connection errors and apiserver shutdown errors retry.
|
||||
if net.IsConnectionReset(err) || net.IsConnectionRefused(err) {
|
||||
// For the purpose of retry, we set the artificial "retry-after" response.
|
||||
// TODO: Should we clean the original response if it exists?
|
||||
resp = &http.Response{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Header: http.Header{"Retry-After": []string{"1"}},
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ go_test(
|
|||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
|
|
|
|||
|
|
@ -26,6 +26,16 @@ import (
|
|||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// DurationMetric is a measurement of some amount of time.
|
||||
type DurationMetric interface {
|
||||
Observe(duration time.Duration)
|
||||
}
|
||||
|
||||
// TTLMetric sets the time to live of something.
|
||||
type TTLMetric interface {
|
||||
Set(ttl *time.Duration)
|
||||
}
|
||||
|
||||
// LatencyMetric observes client latency partitioned by verb and url.
|
||||
type LatencyMetric interface {
|
||||
Observe(verb string, u url.URL, latency time.Duration)
|
||||
|
|
@ -37,21 +47,51 @@ type ResultMetric interface {
|
|||
}
|
||||
|
||||
var (
|
||||
// ClientCertTTL is the time to live of a client certificate
|
||||
ClientCertTTL TTLMetric = noopTTL{}
|
||||
// ClientCertRotationAge is the age of a certificate that has just been rotated.
|
||||
ClientCertRotationAge DurationMetric = noopDuration{}
|
||||
// RequestLatency is the latency metric that rest clients will update.
|
||||
RequestLatency LatencyMetric = noopLatency{}
|
||||
// RequestResult is the result metric that rest clients will update.
|
||||
RequestResult ResultMetric = noopResult{}
|
||||
)
|
||||
|
||||
// RegisterOpts contains all the metrics to register. Metrics may be nil.
|
||||
type RegisterOpts struct {
|
||||
ClientCertTTL TTLMetric
|
||||
ClientCertRotationAge DurationMetric
|
||||
RequestLatency LatencyMetric
|
||||
RequestResult ResultMetric
|
||||
}
|
||||
|
||||
// Register registers metrics for the rest client to use. This can
|
||||
// only be called once.
|
||||
func Register(lm LatencyMetric, rm ResultMetric) {
|
||||
func Register(opts RegisterOpts) {
|
||||
registerMetrics.Do(func() {
|
||||
RequestLatency = lm
|
||||
RequestResult = rm
|
||||
if opts.ClientCertTTL != nil {
|
||||
ClientCertTTL = opts.ClientCertTTL
|
||||
}
|
||||
if opts.ClientCertRotationAge != nil {
|
||||
ClientCertRotationAge = opts.ClientCertRotationAge
|
||||
}
|
||||
if opts.RequestLatency != nil {
|
||||
RequestLatency = opts.RequestLatency
|
||||
}
|
||||
if opts.RequestResult != nil {
|
||||
RequestResult = opts.RequestResult
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type noopDuration struct{}
|
||||
|
||||
func (noopDuration) Observe(time.Duration) {}
|
||||
|
||||
type noopTTL struct{}
|
||||
|
||||
func (noopTTL) Set(*time.Duration) {}
|
||||
|
||||
type noopLatency struct{}
|
||||
|
||||
func (noopLatency) Observe(string, url.URL, time.Duration) {}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
approvers:
|
||||
- sig-instrumentation-approvers
|
||||
- logicalhan
|
||||
- RainbowMango
|
||||
reviewers:
|
||||
- sig-instrumentation-reviewers
|
||||
labels:
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ func NewCounter(opts *CounterOpts) *Counter {
|
|||
lazyMetric: lazyMetric{},
|
||||
}
|
||||
kc.setPrometheusCounter(noop)
|
||||
kc.lazyInit(kc)
|
||||
kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
|
||||
return kc
|
||||
}
|
||||
|
||||
|
|
@ -92,7 +92,7 @@ func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec {
|
|||
originalLabels: labels,
|
||||
lazyMetric: lazyMetric{},
|
||||
}
|
||||
cv.lazyInit(cv)
|
||||
cv.lazyInit(cv, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
|
||||
return cv
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ func NewGauge(opts *GaugeOpts) *Gauge {
|
|||
lazyMetric: lazyMetric{},
|
||||
}
|
||||
kc.setPrometheusGauge(noop)
|
||||
kc.lazyInit(kc)
|
||||
kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
|
||||
return kc
|
||||
}
|
||||
|
||||
|
|
@ -94,7 +94,7 @@ func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec {
|
|||
originalLabels: labels,
|
||||
lazyMetric: lazyMetric{},
|
||||
}
|
||||
cv.lazyInit(cv)
|
||||
cv.lazyInit(cv, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
|
||||
return cv
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ func NewHistogram(opts *HistogramOpts) *Histogram {
|
|||
lazyMetric: lazyMetric{},
|
||||
}
|
||||
h.setPrometheusHistogram(noopMetric{})
|
||||
h.lazyInit(h)
|
||||
h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
|
||||
return h
|
||||
}
|
||||
|
||||
|
|
@ -104,7 +104,7 @@ func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec {
|
|||
originalLabels: labels,
|
||||
lazyMetric: lazyMetric{},
|
||||
}
|
||||
v.lazyInit(v)
|
||||
v.lazyInit(v, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
|
||||
return v
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ implements kubeCollector to get deferred registration behavior. You must call la
|
|||
with the kubeCollector itself as an argument.
|
||||
*/
|
||||
type lazyMetric struct {
|
||||
fqName string
|
||||
isDeprecated bool
|
||||
isHidden bool
|
||||
isCreated bool
|
||||
|
|
@ -81,7 +82,8 @@ func (r *lazyMetric) IsCreated() bool {
|
|||
// lazyInit provides the lazyMetric with a reference to the kubeCollector it is supposed
|
||||
// to allow lazy initialization for. It should be invoked in the factory function which creates new
|
||||
// kubeCollector type objects.
|
||||
func (r *lazyMetric) lazyInit(self kubeCollector) {
|
||||
func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) {
|
||||
r.fqName = fqName
|
||||
r.self = self
|
||||
}
|
||||
|
||||
|
|
@ -98,7 +100,7 @@ func (r *lazyMetric) determineDeprecationStatus(version semver.Version) {
|
|||
r.isDeprecated = true
|
||||
}
|
||||
if ShouldShowHidden() {
|
||||
klog.Warningf("Hidden metrics have been manually overridden, showing this very deprecated metric.")
|
||||
klog.Warningf("Hidden metrics (%s) have been manually overridden, showing this very deprecated metric.", r.fqName)
|
||||
return
|
||||
}
|
||||
if shouldHide(&version, selfVersion) {
|
||||
|
|
@ -143,6 +145,24 @@ func (r *lazyMetric) Create(version *semver.Version) bool {
|
|||
return r.IsCreated()
|
||||
}
|
||||
|
||||
// ClearState will clear all the states marked by Create.
|
||||
// It intends to be used for re-register a hidden metric.
|
||||
func (r *lazyMetric) ClearState() {
|
||||
r.createLock.Lock()
|
||||
defer r.createLock.Unlock()
|
||||
|
||||
r.isDeprecated = false
|
||||
r.isHidden = false
|
||||
r.isCreated = false
|
||||
r.markDeprecationOnce = *(new(sync.Once))
|
||||
r.createOnce = *(new(sync.Once))
|
||||
}
|
||||
|
||||
// FQName returns the fully-qualified metric name of the collector.
|
||||
func (r *lazyMetric) FQName() string {
|
||||
return r.fqName
|
||||
}
|
||||
|
||||
/*
|
||||
This code is directly lifted from the prometheus codebase. It's a convenience struct which
|
||||
allows you satisfy the Collector interface automatically if you already satisfy the Metric interface.
|
||||
|
|
|
|||
72
cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go
generated
vendored
72
cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go
generated
vendored
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package restclient
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
|
|
@ -55,13 +56,62 @@ var (
|
|||
},
|
||||
[]string{"code", "method", "host"},
|
||||
)
|
||||
|
||||
execPluginCertTTL = k8smetrics.NewGauge(
|
||||
&k8smetrics.GaugeOpts{
|
||||
Name: "rest_client_exec_plugin_ttl_seconds",
|
||||
Help: "Gauge of the shortest TTL (time-to-live) of the client " +
|
||||
"certificate(s) managed by the auth exec plugin. The value " +
|
||||
"is in seconds until certificate expiry. If auth exec " +
|
||||
"plugins are unused or manage no TLS certificates, the " +
|
||||
"value will be +INF.",
|
||||
},
|
||||
)
|
||||
|
||||
execPluginCertRotation = k8smetrics.NewHistogram(
|
||||
&k8smetrics.HistogramOpts{
|
||||
Name: "rest_client_exec_plugin_certificate_rotation_age",
|
||||
Help: "Histogram of the number of seconds the last auth exec " +
|
||||
"plugin client certificate lived before being rotated. " +
|
||||
"If auth exec plugin client certificates are unused, " +
|
||||
"histogram will contain no data.",
|
||||
// There are three sets of ranges these buckets intend to capture:
|
||||
// - 10-60 minutes: captures a rotation cadence which is
|
||||
// happening too quickly.
|
||||
// - 4 hours - 1 month: captures an ideal rotation cadence.
|
||||
// - 3 months - 4 years: captures a rotation cadence which is
|
||||
// is probably too slow or much too slow.
|
||||
Buckets: []float64{
|
||||
600, // 10 minutes
|
||||
1800, // 30 minutes
|
||||
3600, // 1 hour
|
||||
14400, // 4 hours
|
||||
86400, // 1 day
|
||||
604800, // 1 week
|
||||
2592000, // 1 month
|
||||
7776000, // 3 months
|
||||
15552000, // 6 months
|
||||
31104000, // 1 year
|
||||
124416000, // 4 years
|
||||
},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
execPluginCertTTL.Set(math.Inf(1)) // Initialize TTL to +INF
|
||||
|
||||
legacyregistry.MustRegister(requestLatency)
|
||||
legacyregistry.MustRegister(deprecatedRequestLatency)
|
||||
legacyregistry.MustRegister(requestResult)
|
||||
metrics.Register(&latencyAdapter{m: requestLatency, dm: deprecatedRequestLatency}, &resultAdapter{requestResult})
|
||||
legacyregistry.MustRegister(execPluginCertTTL)
|
||||
legacyregistry.MustRegister(execPluginCertRotation)
|
||||
metrics.Register(metrics.RegisterOpts{
|
||||
ClientCertTTL: &ttlAdapter{m: execPluginCertTTL},
|
||||
ClientCertRotationAge: &rotationAdapter{m: execPluginCertRotation},
|
||||
RequestLatency: &latencyAdapter{m: requestLatency, dm: deprecatedRequestLatency},
|
||||
RequestResult: &resultAdapter{requestResult},
|
||||
})
|
||||
}
|
||||
|
||||
type latencyAdapter struct {
|
||||
|
|
@ -81,3 +131,23 @@ type resultAdapter struct {
|
|||
func (r *resultAdapter) Increment(code, method, host string) {
|
||||
r.m.WithLabelValues(code, method, host).Inc()
|
||||
}
|
||||
|
||||
type ttlAdapter struct {
|
||||
m *k8smetrics.Gauge
|
||||
}
|
||||
|
||||
func (e *ttlAdapter) Set(ttl *time.Duration) {
|
||||
if ttl == nil {
|
||||
e.m.Set(math.Inf(1))
|
||||
} else {
|
||||
e.m.Set(float64(ttl.Seconds()))
|
||||
}
|
||||
}
|
||||
|
||||
type rotationAdapter struct {
|
||||
m *k8smetrics.Histogram
|
||||
}
|
||||
|
||||
func (r *rotationAdapter) Observe(d time.Duration) {
|
||||
r.m.Observe(d.Seconds())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,6 +32,8 @@ import (
|
|||
var (
|
||||
showHiddenOnce sync.Once
|
||||
showHidden atomic.Value
|
||||
registries []*kubeRegistry // stores all registries created by NewKubeRegistry()
|
||||
registriesLock sync.RWMutex
|
||||
)
|
||||
|
||||
// shouldHide be used to check if a specific metric with deprecated version should be hidden
|
||||
|
|
@ -77,6 +79,11 @@ func ValidateShowHiddenMetricsVersion(v string) []error {
|
|||
func SetShowHidden() {
|
||||
showHiddenOnce.Do(func() {
|
||||
showHidden.Store(true)
|
||||
|
||||
// re-register collectors that has been hidden in phase of last registry.
|
||||
for _, r := range registries {
|
||||
r.enableHiddenCollectors()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -91,7 +98,15 @@ func ShouldShowHidden() bool {
|
|||
// will register with KubeRegistry.
|
||||
type Registerable interface {
|
||||
prometheus.Collector
|
||||
|
||||
// Create will mark deprecated state for the collector
|
||||
Create(version *semver.Version) bool
|
||||
|
||||
// ClearState will clear all the states marked by Create.
|
||||
ClearState()
|
||||
|
||||
// FQName returns the fully-qualified metric name of the collector.
|
||||
FQName() string
|
||||
}
|
||||
|
||||
// KubeRegistry is an interface which implements a subset of prometheus.Registerer and
|
||||
|
|
@ -114,7 +129,9 @@ type KubeRegistry interface {
|
|||
// automatic behavior can be configured for metric versioning.
|
||||
type kubeRegistry struct {
|
||||
PromRegistry
|
||||
version semver.Version
|
||||
version semver.Version
|
||||
hiddenCollectors map[string]Registerable // stores all collectors that has been hidden
|
||||
hiddenCollectorsLock sync.RWMutex
|
||||
}
|
||||
|
||||
// Register registers a new Collector to be included in metrics
|
||||
|
|
@ -126,6 +143,9 @@ func (kr *kubeRegistry) Register(c Registerable) error {
|
|||
if c.Create(&kr.version) {
|
||||
return kr.PromRegistry.Register(c)
|
||||
}
|
||||
|
||||
kr.trackHiddenCollector(c)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -137,6 +157,8 @@ func (kr *kubeRegistry) MustRegister(cs ...Registerable) {
|
|||
for _, c := range cs {
|
||||
if c.Create(&kr.version) {
|
||||
metrics = append(metrics, c)
|
||||
} else {
|
||||
kr.trackHiddenCollector(c)
|
||||
}
|
||||
}
|
||||
kr.PromRegistry.MustRegister(metrics...)
|
||||
|
|
@ -204,11 +226,37 @@ func (kr *kubeRegistry) Gather() ([]*dto.MetricFamily, error) {
|
|||
return kr.PromRegistry.Gather()
|
||||
}
|
||||
|
||||
// trackHiddenCollector stores all hidden collectors.
|
||||
func (kr *kubeRegistry) trackHiddenCollector(c Registerable) {
|
||||
kr.hiddenCollectorsLock.Lock()
|
||||
defer kr.hiddenCollectorsLock.Unlock()
|
||||
|
||||
kr.hiddenCollectors[c.FQName()] = c
|
||||
}
|
||||
|
||||
// enableHiddenCollectors will re-register all of the hidden collectors.
|
||||
func (kr *kubeRegistry) enableHiddenCollectors() {
|
||||
kr.hiddenCollectorsLock.Lock()
|
||||
defer kr.hiddenCollectorsLock.Unlock()
|
||||
|
||||
for _, c := range kr.hiddenCollectors {
|
||||
c.ClearState()
|
||||
kr.MustRegister(c)
|
||||
}
|
||||
kr.hiddenCollectors = nil
|
||||
}
|
||||
|
||||
func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry {
|
||||
r := &kubeRegistry{
|
||||
PromRegistry: prometheus.NewRegistry(),
|
||||
version: parseVersion(v),
|
||||
PromRegistry: prometheus.NewRegistry(),
|
||||
version: parseVersion(v),
|
||||
hiddenCollectors: make(map[string]Registerable),
|
||||
}
|
||||
|
||||
registriesLock.Lock()
|
||||
defer registriesLock.Unlock()
|
||||
registries = append(registries, r)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
|
|
@ -216,5 +264,6 @@ func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry {
|
|||
// pre-registered.
|
||||
func NewKubeRegistry() KubeRegistry {
|
||||
r := newKubeRegistry(version.Get())
|
||||
|
||||
return r
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func NewSummary(opts *SummaryOpts) *Summary {
|
|||
lazyMetric: lazyMetric{},
|
||||
}
|
||||
s.setPrometheusSummary(noopMetric{})
|
||||
s.lazyInit(s)
|
||||
s.lazyInit(s, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
|
||||
return s
|
||||
}
|
||||
|
||||
|
|
@ -98,7 +98,7 @@ func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec {
|
|||
originalLabels: labels,
|
||||
lazyMetric: lazyMetric{},
|
||||
}
|
||||
v.lazyInit(v)
|
||||
v.lazyInit(v, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
|
||||
return v
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ func (a *acrProvider) loadConfig(rdr io.Reader) error {
|
|||
klog.Errorf("Failed to load azure credential file: %v", err)
|
||||
}
|
||||
|
||||
a.environment, err = auth.ParseAzureEnvironment(a.config.Cloud)
|
||||
a.environment, err = auth.ParseAzureEnvironment(a.config.Cloud, a.config.ResourceManagerEndpoint, a.config.IdentitySystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
3
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
3
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
|
|
@ -257,6 +257,9 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool {
|
|||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) {
|
||||
whitelistControllers.Insert("pids")
|
||||
}
|
||||
if _, ok := m.subsystems.MountPoints["hugetlb"]; ok {
|
||||
whitelistControllers.Insert("hugetlb")
|
||||
}
|
||||
var missingPaths []string
|
||||
// If even one cgroup path doesn't exist, then the cgroup doesn't exist.
|
||||
for controller, path := range cgroupPaths {
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func toPullableImageID(id string, image *dockertypes.ImageInspect) string {
|
|||
// Default to the image ID, but if RepoDigests is not empty, use
|
||||
// the first digest instead.
|
||||
imageID := DockerImageIDPrefix + id
|
||||
if len(image.RepoDigests) > 0 {
|
||||
if image != nil && len(image.RepoDigests) > 0 {
|
||||
imageID = DockerPullableImageIDPrefix + image.RepoDigests[0]
|
||||
}
|
||||
return imageID
|
||||
|
|
|
|||
|
|
@ -347,7 +347,10 @@ func (ds *dockerService) ContainerStatus(_ context.Context, req *runtimeapi.Cont
|
|||
// Convert the image id to a pullable id.
|
||||
ir, err := ds.client.InspectImageByID(r.Image)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to inspect docker image %q while inspecting docker container %q: %v", r.Image, containerID, err)
|
||||
if !libdocker.IsImageNotFoundError(err) {
|
||||
return nil, fmt.Errorf("unable to inspect docker image %q while inspecting docker container %q: %v", r.Image, containerID, err)
|
||||
}
|
||||
klog.Warningf("ignore error image %q not found while inspecting docker container %q: %v", r.Image, containerID, err)
|
||||
}
|
||||
imageID := toPullableImageID(r.Image, ir)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@ limitations under the License.
|
|||
|
||||
package events
|
||||
|
||||
// Container event reason list
|
||||
const (
|
||||
// Container event reason list
|
||||
CreatedContainer = "Created"
|
||||
StartedContainer = "Started"
|
||||
FailedToCreateContainer = "Failed"
|
||||
|
|
@ -26,22 +26,28 @@ const (
|
|||
PreemptContainer = "Preempting"
|
||||
BackOffStartContainer = "BackOff"
|
||||
ExceededGracePeriod = "ExceededGracePeriod"
|
||||
)
|
||||
|
||||
// Pod event reason list
|
||||
// Pod event reason list
|
||||
const (
|
||||
FailedToKillPod = "FailedKillPod"
|
||||
FailedToCreatePodContainer = "FailedCreatePodContainer"
|
||||
FailedToMakePodDataDirectories = "Failed"
|
||||
NetworkNotReady = "NetworkNotReady"
|
||||
)
|
||||
|
||||
// Image event reason list
|
||||
// Image event reason list
|
||||
const (
|
||||
PullingImage = "Pulling"
|
||||
PulledImage = "Pulled"
|
||||
FailedToPullImage = "Failed"
|
||||
FailedToInspectImage = "InspectFailed"
|
||||
ErrImageNeverPullPolicy = "ErrImageNeverPull"
|
||||
BackOffPullImage = "BackOff"
|
||||
)
|
||||
|
||||
// kubelet event reason list
|
||||
// kubelet event reason list
|
||||
const (
|
||||
NodeReady = "NodeReady"
|
||||
NodeNotReady = "NodeNotReady"
|
||||
NodeSchedulable = "NodeSchedulable"
|
||||
|
|
@ -66,22 +72,32 @@ const (
|
|||
SandboxChanged = "SandboxChanged"
|
||||
FailedCreatePodSandBox = "FailedCreatePodSandBox"
|
||||
FailedStatusPodSandBox = "FailedPodSandBoxStatus"
|
||||
)
|
||||
|
||||
// Image manager event reason list
|
||||
// Image manager event reason list
|
||||
const (
|
||||
InvalidDiskCapacity = "InvalidDiskCapacity"
|
||||
FreeDiskSpaceFailed = "FreeDiskSpaceFailed"
|
||||
)
|
||||
|
||||
// Probe event reason list
|
||||
// Probe event reason list
|
||||
const (
|
||||
ContainerUnhealthy = "Unhealthy"
|
||||
ContainerProbeWarning = "ProbeWarning"
|
||||
)
|
||||
|
||||
// Pod worker event reason list
|
||||
// Pod worker event reason list
|
||||
const (
|
||||
FailedSync = "FailedSync"
|
||||
)
|
||||
|
||||
// Config event reason list
|
||||
// Config event reason list
|
||||
const (
|
||||
FailedValidation = "FailedValidation"
|
||||
)
|
||||
|
||||
// Lifecycle hooks
|
||||
// Lifecycle hooks
|
||||
const (
|
||||
FailedPostStartHook = "FailedPostStartHook"
|
||||
FailedPreStopHook = "FailedPreStopHook"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ func NewHollowKubelet(
|
|||
VolumePlugins: volumePlugins(),
|
||||
TLSOptions: nil,
|
||||
OOMAdjuster: oom.NewFakeOOMAdjuster(),
|
||||
Mounter: mount.New("" /* default mount path */),
|
||||
Mounter: &mount.FakeMounter{},
|
||||
Subpather: &subpath.FakeSubpath{},
|
||||
HostUtil: hostutil.NewFakeHostUtil(nil),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -600,14 +600,16 @@ func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
|
|||
klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname)
|
||||
return
|
||||
}
|
||||
oldLabels := proxier.nodeLabels
|
||||
newLabels := node.Labels
|
||||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = newLabels
|
||||
proxier.mu.Unlock()
|
||||
if !reflect.DeepEqual(oldLabels, newLabels) {
|
||||
proxier.syncProxyRules()
|
||||
|
||||
if reflect.DeepEqual(proxier.nodeLabels, node.Labels) {
|
||||
return
|
||||
}
|
||||
|
||||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = node.Labels
|
||||
proxier.mu.Unlock()
|
||||
|
||||
proxier.syncProxyRules()
|
||||
}
|
||||
|
||||
// OnNodeUpdate is called whenever modification of an existing
|
||||
|
|
@ -617,14 +619,16 @@ func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
|
|||
klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname)
|
||||
return
|
||||
}
|
||||
oldLabels := proxier.nodeLabels
|
||||
newLabels := node.Labels
|
||||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = newLabels
|
||||
proxier.mu.Unlock()
|
||||
if !reflect.DeepEqual(oldLabels, newLabels) {
|
||||
proxier.syncProxyRules()
|
||||
|
||||
if reflect.DeepEqual(proxier.nodeLabels, node.Labels) {
|
||||
return
|
||||
}
|
||||
|
||||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = node.Labels
|
||||
proxier.mu.Unlock()
|
||||
|
||||
proxier.syncProxyRules()
|
||||
}
|
||||
|
||||
// OnNodeDelete is called whever deletion of an existing node
|
||||
|
|
@ -637,6 +641,7 @@ func (proxier *Proxier) OnNodeDelete(node *v1.Node) {
|
|||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = nil
|
||||
proxier.mu.Unlock()
|
||||
|
||||
proxier.syncProxyRules()
|
||||
}
|
||||
|
||||
|
|
@ -842,7 +847,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
writeLine(proxier.filterChains, utiliptables.MakeChainLine(chainName))
|
||||
}
|
||||
}
|
||||
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain, KubeMarkDropChain} {
|
||||
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} {
|
||||
if chain, ok := existingNATChains[chainName]; ok {
|
||||
writeBytesLine(proxier.natChains, chain)
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -906,14 +906,16 @@ func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
|
|||
klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname)
|
||||
return
|
||||
}
|
||||
oldLabels := proxier.nodeLabels
|
||||
newLabels := node.Labels
|
||||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = newLabels
|
||||
proxier.mu.Unlock()
|
||||
if !reflect.DeepEqual(oldLabels, newLabels) {
|
||||
proxier.syncProxyRules()
|
||||
|
||||
if reflect.DeepEqual(proxier.nodeLabels, node.Labels) {
|
||||
return
|
||||
}
|
||||
|
||||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = node.Labels
|
||||
proxier.mu.Unlock()
|
||||
|
||||
proxier.syncProxyRules()
|
||||
}
|
||||
|
||||
// OnNodeUpdate is called whenever modification of an existing
|
||||
|
|
@ -923,14 +925,16 @@ func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
|
|||
klog.Errorf("Received a watch event for a node %s that doesn't match the current node %v", node.Name, proxier.hostname)
|
||||
return
|
||||
}
|
||||
oldLabels := proxier.nodeLabels
|
||||
newLabels := node.Labels
|
||||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = newLabels
|
||||
proxier.mu.Unlock()
|
||||
if !reflect.DeepEqual(oldLabels, newLabels) {
|
||||
proxier.syncProxyRules()
|
||||
|
||||
if reflect.DeepEqual(proxier.nodeLabels, node.Labels) {
|
||||
return
|
||||
}
|
||||
|
||||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = node.Labels
|
||||
proxier.mu.Unlock()
|
||||
|
||||
proxier.syncProxyRules()
|
||||
}
|
||||
|
||||
// OnNodeDelete is called whever deletion of an existing node
|
||||
|
|
@ -943,6 +947,7 @@ func (proxier *Proxier) OnNodeDelete(node *v1.Node) {
|
|||
proxier.mu.Lock()
|
||||
proxier.nodeLabels = nil
|
||||
proxier.mu.Unlock()
|
||||
|
||||
proxier.syncProxyRules()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1218,8 +1218,14 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
|
||||
// If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints
|
||||
if svcInfo.nodePort > 0 {
|
||||
// If the preserve-destination service annotation is present, we will disable routing mesh for NodePort.
|
||||
// This means that health services can use Node Port without falsely getting results from a different node.
|
||||
nodePortEndpoints := hnsEndpoints
|
||||
if svcInfo.preserveDIP {
|
||||
nodePortEndpoints = hnsLocalEndpoints
|
||||
}
|
||||
hnsLoadBalancer, err := hns.getLoadBalancer(
|
||||
hnsEndpoints,
|
||||
nodePortEndpoints,
|
||||
loadBalancerFlags{localRoutedVIP: true},
|
||||
sourceVip,
|
||||
"",
|
||||
|
|
|
|||
|
|
@ -81,11 +81,6 @@ var (
|
|||
predicateMetadataProducerFactory PredicateMetadataProducerFactory
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultProvider defines the default algorithm provider name.
|
||||
DefaultProvider = "DefaultProvider"
|
||||
)
|
||||
|
||||
// AlgorithmProviderConfig is used to store the configuration of algorithm providers.
|
||||
type AlgorithmProviderConfig struct {
|
||||
FitPredicateKeys sets.String
|
||||
|
|
|
|||
5
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD
generated
vendored
5
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD
generated
vendored
|
|
@ -17,7 +17,10 @@ go_test(
|
|||
name = "go_default_test",
|
||||
srcs = ["plugins_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//pkg/scheduler:go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ go_library(
|
|||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -93,7 +94,7 @@ func ApplyFeatureGates() (restore func()) {
|
|||
func registerAlgorithmProvider(predSet, priSet sets.String) {
|
||||
// Registers algorithm providers. By default we use 'DefaultProvider', but user can specify one to be used
|
||||
// by specifying flag.
|
||||
scheduler.RegisterAlgorithmProvider(scheduler.DefaultProvider, predSet, priSet)
|
||||
scheduler.RegisterAlgorithmProvider(schedulerapi.SchedulerDefaultProviderName, predSet, priSet)
|
||||
// Cluster autoscaler friendly scheduling algorithm.
|
||||
scheduler.RegisterAlgorithmProvider(ClusterAutoscalerProvider, predSet,
|
||||
copyAndReplace(priSet, priorities.LeastRequestedPriority, priorities.MostRequestedPriority))
|
||||
|
|
|
|||
10
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go
generated
vendored
10
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go
generated
vendored
|
|
@ -132,8 +132,12 @@ type ScheduleAlgorithm interface {
|
|||
Extenders() []algorithm.SchedulerExtender
|
||||
// GetPredicateMetadataProducer returns the predicate metadata producer. This is needed
|
||||
// for cluster autoscaler integration.
|
||||
// TODO(ahg-g): remove this once CA migrates to creating a Framework instead of a full scheduler.
|
||||
// TODO(#85691): remove this once CA migrates to creating a Framework instead of a full scheduler.
|
||||
PredicateMetadataProducer() predicates.MetadataProducer
|
||||
// Snapshot snapshots scheduler cache and node infos. This is needed
|
||||
// for cluster autoscaler integration.
|
||||
// TODO(#85691): remove this once CA migrates to creating a Framework instead of a full scheduler.
|
||||
Snapshot() error
|
||||
}
|
||||
|
||||
// ScheduleResult represents the result of one pod scheduled. It will contain
|
||||
|
|
@ -169,7 +173,7 @@ type genericScheduler struct {
|
|||
|
||||
// snapshot snapshots scheduler cache and node infos for all fit and priority
|
||||
// functions.
|
||||
func (g *genericScheduler) snapshot() error {
|
||||
func (g *genericScheduler) Snapshot() error {
|
||||
// Used for all fit and priority funcs.
|
||||
return g.cache.UpdateNodeInfoSnapshot(g.nodeInfoSnapshot)
|
||||
}
|
||||
|
|
@ -192,7 +196,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
|
|||
}
|
||||
trace.Step("Basic checks done")
|
||||
|
||||
if err := g.snapshot(); err != nil {
|
||||
if err := g.Snapshot(); err != nil {
|
||||
return result, err
|
||||
}
|
||||
trace.Step("Snapshoting scheduler cache and node infos done")
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ func (c *Configurator) GetHardPodAffinitySymmetricWeight() int32 {
|
|||
|
||||
// Create creates a scheduler with the default algorithm provider.
|
||||
func (c *Configurator) Create() (*Scheduler, error) {
|
||||
return c.CreateFromProvider(DefaultProvider)
|
||||
return c.CreateFromProvider(schedulerapi.SchedulerDefaultProviderName)
|
||||
}
|
||||
|
||||
// CreateFromProvider creates a scheduler from the name of a registered algorithm provider.
|
||||
|
|
@ -143,8 +143,8 @@ func (c *Configurator) CreateFromConfig(policy schedulerapi.Policy) (*Scheduler,
|
|||
|
||||
predicateKeys := sets.NewString()
|
||||
if policy.Predicates == nil {
|
||||
klog.V(2).Infof("Using predicates from algorithm provider '%v'", DefaultProvider)
|
||||
provider, err := GetAlgorithmProvider(DefaultProvider)
|
||||
klog.V(2).Infof("Using predicates from algorithm provider '%v'", schedulerapi.SchedulerDefaultProviderName)
|
||||
provider, err := GetAlgorithmProvider(schedulerapi.SchedulerDefaultProviderName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -158,8 +158,8 @@ func (c *Configurator) CreateFromConfig(policy schedulerapi.Policy) (*Scheduler,
|
|||
|
||||
priorityKeys := sets.NewString()
|
||||
if policy.Priorities == nil {
|
||||
klog.V(2).Infof("Using priorities from algorithm provider '%v'", DefaultProvider)
|
||||
provider, err := GetAlgorithmProvider(DefaultProvider)
|
||||
klog.V(2).Infof("Using priorities from algorithm provider '%v'", schedulerapi.SchedulerDefaultProviderName)
|
||||
provider, err := GetAlgorithmProvider(schedulerapi.SchedulerDefaultProviderName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func (d *CacheDumper) dumpNodes() {
|
|||
snapshot := d.cache.Snapshot()
|
||||
klog.Info("Dump of cached NodeInfo")
|
||||
for _, nodeInfo := range snapshot.Nodes {
|
||||
klog.Info(printNodeInfo(nodeInfo))
|
||||
klog.Info(d.printNodeInfo(nodeInfo))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -61,14 +61,22 @@ func (d *CacheDumper) dumpSchedulingQueue() {
|
|||
}
|
||||
|
||||
// printNodeInfo writes parts of NodeInfo to a string.
|
||||
func printNodeInfo(n *schedulernodeinfo.NodeInfo) string {
|
||||
func (d *CacheDumper) printNodeInfo(n *schedulernodeinfo.NodeInfo) string {
|
||||
var nodeData strings.Builder
|
||||
nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nNumber of Pods: %v\nPods:\n",
|
||||
nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n",
|
||||
n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods())))
|
||||
// Dumping Pod Info
|
||||
for _, p := range n.Pods() {
|
||||
nodeData.WriteString(printPod(p))
|
||||
}
|
||||
// Dumping nominated pods info on the node
|
||||
nominatedPods := d.podQueue.NominatedPodsForNode(n.Node().Name)
|
||||
if len(nominatedPods) != 0 {
|
||||
nodeData.WriteString(fmt.Sprintf("Nominated Pods(number: %v):\n", len(nominatedPods)))
|
||||
for _, p := range nominatedPods {
|
||||
nodeData.WriteString(printPod(p))
|
||||
}
|
||||
}
|
||||
return nodeData.String()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -33,14 +33,17 @@ import (
|
|||
utiltrace "k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
// RulePosition holds the -I/-A flags for iptable
|
||||
type RulePosition string
|
||||
|
||||
const (
|
||||
// Prepend is the insert flag for iptable
|
||||
Prepend RulePosition = "-I"
|
||||
Append RulePosition = "-A"
|
||||
// Append is the append flag for iptable
|
||||
Append RulePosition = "-A"
|
||||
)
|
||||
|
||||
// An injectable interface for running iptables commands. Implementations must be goroutine-safe.
|
||||
// Interface is an injectable interface for running iptables commands. Implementations must be goroutine-safe.
|
||||
type Interface interface {
|
||||
// EnsureChain checks if the specified chain exists and, if not, creates it. If the chain existed, return true.
|
||||
EnsureChain(table Table, chain Chain) (bool, error)
|
||||
|
|
@ -83,29 +86,42 @@ type Interface interface {
|
|||
HasRandomFully() bool
|
||||
}
|
||||
|
||||
// Protocol defines the ip protocol either ipv4 or ipv6
|
||||
type Protocol byte
|
||||
|
||||
const (
|
||||
// ProtocolIpv4 represents ipv4 protocol in iptables
|
||||
ProtocolIpv4 Protocol = iota + 1
|
||||
// ProtocolIpv6 represents ipv6 protocol in iptables
|
||||
ProtocolIpv6
|
||||
)
|
||||
|
||||
// Table represents different iptable like filter,nat, mangle and raw
|
||||
type Table string
|
||||
|
||||
const (
|
||||
TableNAT Table = "nat"
|
||||
// TableNAT represents the built-in nat table
|
||||
TableNAT Table = "nat"
|
||||
// TableFilter represents the built-in filter table
|
||||
TableFilter Table = "filter"
|
||||
// TableMangle represents the built-in mangle table
|
||||
TableMangle Table = "mangle"
|
||||
)
|
||||
|
||||
// Chain represents the different rules
|
||||
type Chain string
|
||||
|
||||
const (
|
||||
// ChainPostrouting used for source NAT in nat table
|
||||
ChainPostrouting Chain = "POSTROUTING"
|
||||
ChainPrerouting Chain = "PREROUTING"
|
||||
ChainOutput Chain = "OUTPUT"
|
||||
ChainInput Chain = "INPUT"
|
||||
ChainForward Chain = "FORWARD"
|
||||
// ChainPrerouting used for DNAT (destination NAT) in nat table
|
||||
ChainPrerouting Chain = "PREROUTING"
|
||||
// ChainOutput used for the packets going out from local
|
||||
ChainOutput Chain = "OUTPUT"
|
||||
// ChainInput used for incoming packets
|
||||
ChainInput Chain = "INPUT"
|
||||
// ChainForward used for the packets for another NIC
|
||||
ChainForward Chain = "FORWARD"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -117,32 +133,49 @@ const (
|
|||
cmdIP6Tables string = "ip6tables"
|
||||
)
|
||||
|
||||
// Option flag for Restore
|
||||
// RestoreCountersFlag is an option flag for Restore
|
||||
type RestoreCountersFlag bool
|
||||
|
||||
// RestoreCounters a boolean true constant for the option flag RestoreCountersFlag
|
||||
const RestoreCounters RestoreCountersFlag = true
|
||||
|
||||
// NoRestoreCounters a boolean false constant for the option flag RestoreCountersFlag
|
||||
const NoRestoreCounters RestoreCountersFlag = false
|
||||
|
||||
// Option flag for Flush
|
||||
// FlushFlag an option flag for Flush
|
||||
type FlushFlag bool
|
||||
|
||||
// FlushTables a boolean true constant for option flag FlushFlag
|
||||
const FlushTables FlushFlag = true
|
||||
|
||||
// NoFlushTables a boolean false constant for option flag FlushFlag
|
||||
const NoFlushTables FlushFlag = false
|
||||
|
||||
// MinCheckVersion minimum version to be checked
|
||||
// Versions of iptables less than this do not support the -C / --check flag
|
||||
// (test whether a rule exists).
|
||||
var MinCheckVersion = utilversion.MustParseGeneric("1.4.11")
|
||||
|
||||
// RandomFullyMinVersion is the minimum version from which the --random-fully flag is supported,
|
||||
// used for port mapping to be fully randomized
|
||||
var RandomFullyMinVersion = utilversion.MustParseGeneric("1.6.2")
|
||||
|
||||
// Minimum iptables versions supporting the -w and -w<seconds> flags
|
||||
// WaitMinVersion a minimum iptables versions supporting the -w and -w<seconds> flags
|
||||
var WaitMinVersion = utilversion.MustParseGeneric("1.4.20")
|
||||
|
||||
// WaitSecondsMinVersion a minimum iptables versions supporting the wait seconds
|
||||
var WaitSecondsMinVersion = utilversion.MustParseGeneric("1.4.22")
|
||||
|
||||
// WaitRestoreMinVersion a minimum iptables versions supporting the wait restore seconds
|
||||
var WaitRestoreMinVersion = utilversion.MustParseGeneric("1.6.2")
|
||||
|
||||
// WaitString a constant for specifying the wait flag
|
||||
const WaitString = "-w"
|
||||
|
||||
// WaitSecondsValue a constant for specifying the default wait seconds
|
||||
const WaitSecondsValue = "5"
|
||||
|
||||
// LockfilePath16x is the iptables lock file acquired by any process that's making any change in the iptable rule
|
||||
const LockfilePath16x = "/run/xtables.lock"
|
||||
|
||||
// runner implements Interface in terms of exec("iptables").
|
||||
|
|
@ -706,7 +739,6 @@ const iptablesStatusResourceProblem = 4
|
|||
func isResourceError(err error) bool {
|
||||
if ee, isExitError := err.(utilexec.ExitError); isExitError {
|
||||
return ee.ExitStatus() == iptablesStatusResourceProblem
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ func GetNodeHostIP(node *v1.Node) (net.IP, error) {
|
|||
func GetNodeIP(client clientset.Interface, hostname string) net.IP {
|
||||
var nodeIP net.IP
|
||||
backoff := wait.Backoff{
|
||||
Steps: 5,
|
||||
Steps: 6,
|
||||
Duration: 1 * time.Second,
|
||||
Factor: 2.0,
|
||||
Jitter: 0.2,
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ approvers:
|
|||
- msau42
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- cofyc
|
||||
reviewers:
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
|
|
@ -14,3 +15,4 @@ reviewers:
|
|||
- jingxu97
|
||||
- msau42
|
||||
- vishh
|
||||
- cofyc
|
||||
|
|
|
|||
44
cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go
generated
vendored
44
cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/azure/auth/azure_auth.go
generated
vendored
|
|
@ -30,11 +30,14 @@ import (
|
|||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
// ADFSIdentitySystem is the override value for tenantID on Azure Stack clouds.
|
||||
ADFSIdentitySystem = "adfs"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorNoAuth indicates that no credentials are provided.
|
||||
ErrorNoAuth = fmt.Errorf("no credentials provided for Azure cloud provider")
|
||||
// ADFSIdentitySystem indicates value of tenantId for ADFS on Azure Stack.
|
||||
ADFSIdentitySystem = "ADFS"
|
||||
)
|
||||
|
||||
// AzureAuthConfig holds auth related part of cloud config
|
||||
|
|
@ -59,15 +62,19 @@ type AzureAuthConfig struct {
|
|||
UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty" yaml:"userAssignedIdentityID,omitempty"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId,omitempty" yaml:"subscriptionId,omitempty"`
|
||||
// Identity system value for the deployment. This gets populate for Azure Stack case.
|
||||
// IdentitySystem indicates the identity provider. Relevant only to hybrid clouds (Azure Stack).
|
||||
// Allowed values are 'azure_ad' (default), 'adfs'.
|
||||
IdentitySystem string `json:"identitySystem,omitempty" yaml:"identitySystem,omitempty"`
|
||||
// ResourceManagerEndpoint is the cloud's resource manager endpoint. If set, cloud provider queries this endpoint
|
||||
// in order to generate an autorest.Environment instance instead of using one of the pre-defined Environments.
|
||||
ResourceManagerEndpoint string `json:"resourceManagerEndpoint,omitempty" yaml:"resourceManagerEndpoint,omitempty"`
|
||||
}
|
||||
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
var tenantID string
|
||||
if strings.EqualFold(config.IdentitySystem, ADFSIdentitySystem) {
|
||||
tenantID = "adfs"
|
||||
tenantID = ADFSIdentitySystem
|
||||
} else {
|
||||
tenantID = config.TenantID
|
||||
}
|
||||
|
|
@ -125,13 +132,24 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (
|
|||
return nil, ErrorNoAuth
|
||||
}
|
||||
|
||||
// ParseAzureEnvironment returns azure environment by name
|
||||
func ParseAzureEnvironment(cloudName string) (*azure.Environment, error) {
|
||||
// ParseAzureEnvironment returns the azure environment.
|
||||
// If 'resourceManagerEndpoint' is set, the environment is computed by quering the cloud's resource manager endpoint.
|
||||
// Otherwise, a pre-defined Environment is looked up by name.
|
||||
func ParseAzureEnvironment(cloudName, resourceManagerEndpoint, identitySystem string) (*azure.Environment, error) {
|
||||
var env azure.Environment
|
||||
var err error
|
||||
if cloudName == "" {
|
||||
if resourceManagerEndpoint != "" {
|
||||
klog.V(4).Infof("Loading environment from resource manager endpoint: %s", resourceManagerEndpoint)
|
||||
nameOverride := azure.OverrideProperty{Key: azure.EnvironmentName, Value: cloudName}
|
||||
env, err = azure.EnvironmentFromURL(resourceManagerEndpoint, nameOverride)
|
||||
if err == nil {
|
||||
azureStackOverrides(&env, resourceManagerEndpoint, identitySystem)
|
||||
}
|
||||
} else if cloudName == "" {
|
||||
klog.V(4).Info("Using public cloud environment")
|
||||
env = azure.PublicCloud
|
||||
} else {
|
||||
klog.V(4).Infof("Using %s environment", cloudName)
|
||||
env, err = azure.EnvironmentFromName(cloudName)
|
||||
}
|
||||
return &env, err
|
||||
|
|
@ -151,3 +169,15 @@ func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.Private
|
|||
|
||||
return certificate, rsaPrivateKey, nil
|
||||
}
|
||||
|
||||
// azureStackOverrides ensures that the Environment matches what AKSe currently generates for Azure Stack
|
||||
func azureStackOverrides(env *azure.Environment, resourceManagerEndpoint, identitySystem string) {
|
||||
env.ManagementPortalURL = strings.Replace(resourceManagerEndpoint, "https://management.", "https://portal.", -1)
|
||||
env.ServiceManagementEndpoint = env.TokenAudience
|
||||
env.ResourceManagerVMDNSSuffix = strings.Replace(resourceManagerEndpoint, "https://management.", "cloudapp.", -1)
|
||||
env.ResourceManagerVMDNSSuffix = strings.TrimSuffix(env.ResourceManagerVMDNSSuffix, "/")
|
||||
if strings.EqualFold(identitySystem, ADFSIdentitySystem) {
|
||||
env.ActiveDirectoryEndpoint = strings.TrimSuffix(env.ActiveDirectoryEndpoint, "/")
|
||||
env.ActiveDirectoryEndpoint = strings.TrimSuffix(env.ActiveDirectoryEndpoint, "adfs")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -325,7 +325,7 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro
|
|||
}
|
||||
}
|
||||
|
||||
env, err := auth.ParseAzureEnvironment(config.Cloud)
|
||||
env, err := auth.ParseAzureEnvironment(config.Cloud, config.ResourceManagerEndpoint, config.IdentitySystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ github.com/golang/protobuf/ptypes/timestamp
|
|||
github.com/golang/protobuf/ptypes/wrappers
|
||||
github.com/golang/protobuf/ptypes
|
||||
github.com/golang/protobuf/ptypes/duration
|
||||
# github.com/google/cadvisor v0.34.0 => github.com/google/cadvisor v0.34.0
|
||||
# github.com/google/cadvisor v0.35.0 => github.com/google/cadvisor v0.35.0
|
||||
github.com/google/cadvisor/container/common
|
||||
github.com/google/cadvisor/container/containerd
|
||||
github.com/google/cadvisor/container/docker
|
||||
|
|
@ -664,7 +664,7 @@ gopkg.in/square/go-jose.v2/json
|
|||
gopkg.in/warnings.v0
|
||||
# gopkg.in/yaml.v2 v2.2.4 => gopkg.in/yaml.v2 v2.2.4
|
||||
gopkg.in/yaml.v2
|
||||
# k8s.io/api v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/api
|
||||
# k8s.io/api v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/api
|
||||
k8s.io/api/core/v1
|
||||
k8s.io/api/apps/v1
|
||||
k8s.io/api/policy/v1beta1
|
||||
|
|
@ -708,9 +708,9 @@ k8s.io/api/authorization/v1beta1
|
|||
k8s.io/api/admission/v1beta1
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/imagepolicy/v1alpha1
|
||||
# k8s.io/apiextensions-apiserver v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apiextensions-apiserver
|
||||
# k8s.io/apiextensions-apiserver v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/apiextensions-apiserver
|
||||
k8s.io/apiextensions-apiserver/pkg/features
|
||||
# k8s.io/apimachinery v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apimachinery
|
||||
# k8s.io/apimachinery v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/apimachinery
|
||||
k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
k8s.io/apimachinery/pkg/api/resource
|
||||
k8s.io/apimachinery/pkg/util/sets
|
||||
|
|
@ -768,7 +768,7 @@ k8s.io/apimachinery/pkg/api/validation/path
|
|||
k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation
|
||||
# k8s.io/apiserver v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/apiserver
|
||||
# k8s.io/apiserver v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/apiserver
|
||||
k8s.io/apiserver/pkg/util/feature
|
||||
k8s.io/apiserver/pkg/features
|
||||
k8s.io/apiserver/pkg/admission
|
||||
|
|
@ -852,13 +852,12 @@ k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal
|
|||
k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission
|
||||
k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1
|
||||
k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1
|
||||
# k8s.io/client-go v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/client-go
|
||||
# k8s.io/client-go v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/client-go
|
||||
k8s.io/client-go/kubernetes
|
||||
k8s.io/client-go/rest
|
||||
k8s.io/client-go/tools/clientcmd
|
||||
k8s.io/client-go/tools/leaderelection
|
||||
k8s.io/client-go/tools/leaderelection/resourcelock
|
||||
k8s.io/client-go/pkg/version
|
||||
k8s.io/client-go/informers
|
||||
k8s.io/client-go/util/cert
|
||||
k8s.io/client-go/tools/record
|
||||
|
|
@ -912,6 +911,7 @@ k8s.io/client-go/kubernetes/typed/storage/v1
|
|||
k8s.io/client-go/kubernetes/typed/storage/v1alpha1
|
||||
k8s.io/client-go/kubernetes/typed/storage/v1beta1
|
||||
k8s.io/client-go/util/flowcontrol
|
||||
k8s.io/client-go/pkg/version
|
||||
k8s.io/client-go/plugin/pkg/client/auth/exec
|
||||
k8s.io/client-go/rest/watch
|
||||
k8s.io/client-go/tools/clientcmd/api
|
||||
|
|
@ -1079,14 +1079,14 @@ k8s.io/client-go/tools/watch
|
|||
k8s.io/client-go/metadata/metadatainformer
|
||||
k8s.io/client-go/metadata
|
||||
k8s.io/client-go/metadata/metadatalister
|
||||
# k8s.io/cloud-provider v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cloud-provider
|
||||
# k8s.io/cloud-provider v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/node/helpers
|
||||
k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/component-base v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/component-base
|
||||
# k8s.io/component-base v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/component-base
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/config
|
||||
k8s.io/component-base/metrics/legacyregistry
|
||||
|
|
@ -1098,10 +1098,10 @@ k8s.io/component-base/version/verflag
|
|||
k8s.io/component-base/logs
|
||||
k8s.io/component-base/config/validation
|
||||
k8s.io/component-base/config/v1alpha1
|
||||
# k8s.io/cri-api v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/cri-api
|
||||
# k8s.io/cri-api v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/cri-api
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/csi-translation-lib
|
||||
# k8s.io/csi-translation-lib v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
k8s.io/csi-translation-lib
|
||||
# k8s.io/klog v1.0.0 => k8s.io/klog v1.0.0
|
||||
|
|
@ -1113,18 +1113,18 @@ k8s.io/kube-openapi/pkg/common
|
|||
k8s.io/kube-openapi/pkg/handler
|
||||
k8s.io/kube-openapi/pkg/util
|
||||
k8s.io/kube-openapi/pkg/schemaconv
|
||||
# k8s.io/kube-proxy v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-proxy
|
||||
# k8s.io/kube-proxy v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kube-proxy
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kube-scheduler
|
||||
# k8s.io/kube-scheduler v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kube-scheduler
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1alpha1
|
||||
# k8s.io/kubectl v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kubectl
|
||||
# k8s.io/kubectl v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kubectl
|
||||
k8s.io/kubectl/pkg/scale
|
||||
# k8s.io/kubelet v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/kubelet
|
||||
# k8s.io/kubelet v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/kubelet
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
||||
# k8s.io/kubernetes v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes
|
||||
# k8s.io/kubernetes v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes
|
||||
k8s.io/kubernetes/pkg/client/leaderelectionconfig
|
||||
k8s.io/kubernetes/pkg/apis/scheduling
|
||||
k8s.io/kubernetes/pkg/kubelet/types
|
||||
|
|
@ -1136,7 +1136,6 @@ k8s.io/kubernetes/pkg/scheduler/api
|
|||
k8s.io/kubernetes/pkg/scheduler
|
||||
k8s.io/kubernetes/pkg/scheduler/algorithm/predicates
|
||||
k8s.io/kubernetes/pkg/scheduler/algorithmprovider
|
||||
k8s.io/kubernetes/pkg/scheduler/apis/config
|
||||
k8s.io/kubernetes/pkg/scheduler/listers
|
||||
k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot
|
||||
k8s.io/kubernetes/pkg/api/v1/pod
|
||||
|
|
@ -1189,6 +1188,7 @@ k8s.io/kubernetes/test/utils
|
|||
k8s.io/kubernetes/pkg/scheduler/apis/extender/v1
|
||||
k8s.io/kubernetes/pkg/scheduler/algorithm
|
||||
k8s.io/kubernetes/pkg/scheduler/algorithm/priorities
|
||||
k8s.io/kubernetes/pkg/scheduler/apis/config
|
||||
k8s.io/kubernetes/pkg/scheduler/apis/config/scheme
|
||||
k8s.io/kubernetes/pkg/scheduler/apis/config/validation
|
||||
k8s.io/kubernetes/pkg/scheduler/core
|
||||
|
|
@ -1505,7 +1505,7 @@ k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
|
|||
k8s.io/kubernetes/pkg/security/podsecuritypolicy/util
|
||||
k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations
|
||||
k8s.io/kubernetes/pkg/serviceaccount
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => /tmp/ca-update-vendor.oDH2/kubernetes/staging/src/k8s.io/legacy-cloud-providers
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => /tmp/ca-update-vendor.55jg/kubernetes/staging/src/k8s.io/legacy-cloud-providers
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/gce
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
|
|
|
|||
Loading…
Reference in New Issue