mirror of https://github.com/kubernetes/kops.git
Merge remote-tracking branch 'upstream/master' into extra_user-data
This commit is contained in:
commit
d803480485
26
ROADMAP.md
26
ROADMAP.md
|
@ -1,5 +1,29 @@
|
|||
# ROADMAP
|
||||
|
||||
# 1.9
|
||||
|
||||
## Must-have features
|
||||
|
||||
* Support for k8s 1.9
|
||||
|
||||
## Other features
|
||||
|
||||
* Use NodeAuthorizer / bootstrap kubeconfigs [#3551](https://github.com/kubernetes/kops/issues/3551)
|
||||
|
||||
|
||||
# HISTORICAL
|
||||
|
||||
# 1.8
|
||||
|
||||
## Must-have features
|
||||
|
||||
* Support for k8s 1.8
|
||||
|
||||
## Other features
|
||||
|
||||
* Improved GCE support
|
||||
* Support for API aggregation
|
||||
|
||||
# 1.7
|
||||
|
||||
## Must-have features
|
||||
|
@ -18,8 +42,6 @@
|
|||
* RBAC policies for all components
|
||||
* bringing rolling-update out of alpha
|
||||
|
||||
# HISTORICAL
|
||||
|
||||
## 1.6
|
||||
|
||||
### Must-have features
|
||||
|
|
|
@ -26,4 +26,8 @@ spec:
|
|||
- version: 1.6.3
|
||||
selector:
|
||||
k8s-addon: kubernetes-dashboard.addons.k8s.io
|
||||
manifest: v1.6.3.yaml
|
||||
manifest: v1.6.3.yaml
|
||||
- version: 1.7.1
|
||||
selector:
|
||||
k8s-addon: kubernetes-dashboard.addons.k8s.io
|
||||
manifest: v1.7.1.yaml
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
# Copyright 2017 The Kubernetes Dashboard Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.7.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create and watch for changes of 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
# Allow Dashboard to get, update and delete 'kubernetes-dashboard-key-holder' secret.
|
||||
resourceNames: ["kubernetes-dashboard-key-holder"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.7.1
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
protocol: TCP
|
||||
args:
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9090
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 9090
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
|
@ -52,11 +52,12 @@ var (
|
|||
|
||||
// the options for the command
|
||||
type toolboxTemplateOption struct {
|
||||
clusterName string
|
||||
configPath []string
|
||||
outputPath string
|
||||
snippetsPath []string
|
||||
templatePath []string
|
||||
clusterName string
|
||||
configPath []string
|
||||
failOnMissing bool
|
||||
outputPath string
|
||||
snippetsPath []string
|
||||
templatePath []string
|
||||
}
|
||||
|
||||
// NewCmdToolboxTemplate returns a new templating command
|
||||
|
@ -84,6 +85,7 @@ func NewCmdToolboxTemplate(f *util.Factory, out io.Writer) *cobra.Command {
|
|||
cmd.Flags().StringSliceVar(&options.templatePath, "template", options.templatePath, "Path to template file or directory of templates to render")
|
||||
cmd.Flags().StringSliceVar(&options.snippetsPath, "snippets", options.snippetsPath, "Path to directory containing snippets used for templating")
|
||||
cmd.Flags().StringVar(&options.outputPath, "output", options.outputPath, "Path to output file, otherwise defaults to stdout")
|
||||
cmd.Flags().BoolVar(&options.failOnMissing, "fail-on-missing", true, "Fail on referencing unset variables in templates")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -160,10 +162,11 @@ func runToolBoxTemplate(f *util.Factory, out io.Writer, options *toolboxTemplate
|
|||
return fmt.Errorf("unable to read template: %s, error: %s", x, err)
|
||||
}
|
||||
|
||||
rendered, err := r.Render(string(content), context, snippets)
|
||||
rendered, err := r.Render(string(content), context, snippets, options.failOnMissing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to render template: %s, error: %s", x, err)
|
||||
}
|
||||
|
||||
io.WriteString(writer, rendered)
|
||||
|
||||
// @check if we should need to add document separator
|
||||
|
|
|
@ -16,7 +16,7 @@ The [dashboard project](https://github.com/kubernetes/dashboard) provides a nice
|
|||
|
||||
Install using:
|
||||
```
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.6.3.yaml
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.7.1.yaml
|
||||
```
|
||||
|
||||
And then navigate to `https://api.<clustername>/ui`
|
||||
|
|
|
@ -155,7 +155,11 @@ kubectl get pods -n kube-system -o \
|
|||
|
||||
You should see version 1.14.5 for the dnsmasq pod
|
||||
|
||||
_TODO_ if someone wants to provide the output.
|
||||
```console
|
||||
NAME IMAGE
|
||||
kube-dns-4146767324-djthf gcr.io/google_containers/kubedns-amd64:1.9,gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5,gcr.io/google_containers/dnsmasq-metrics-amd64:1.0,gcr.io/google_containers/exechealthz-amd64:1.2
|
||||
kube-dns-4146767324-kloxi gcr.io/google_containers/kubedns-amd64:1.9,gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5,gcr.io/google_containers/dnsmasq-metrics-amd64:1.0,gcr.io/google_containers/exechealthz-amd64:1.2
|
||||
```
|
||||
|
||||
## More Information
|
||||
- [Kubernetes Security Blog Post](https://security.googleblog.com/2017/10/behind-masq-yet-more-dns-and-dhcp.html)
|
||||
|
|
|
@ -29,6 +29,7 @@ kops toolbox template
|
|||
### Options
|
||||
|
||||
```
|
||||
--fail-on-missing Fail on referencing unset variables in templates (default true)
|
||||
--output string Path to output file, otherwise defaults to stdout
|
||||
--snippets stringSlice Path to directory containing snippets used for templating
|
||||
--template stringSlice Path to template file or directory of templates to render
|
||||
|
|
|
@ -219,6 +219,17 @@ Will resulting to running kube-scheduler with the arguments `--policy-configmap=
|
|||
|
||||
Note that as of Kubernetes 1.8.0 kube-scheduler does not reload its configuration from configmap automatically. You will need to ssh into the master instance and restart the Docker container manually. Also, this option is not supported during cluster creation, only during updates.
|
||||
|
||||
### kubeControllerManager
|
||||
This block contains configurations for the `controller-manager`.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
kubeControllerManager:
|
||||
horizontalPodAutoscalerSyncPeriod: 15s
|
||||
```
|
||||
|
||||
For more details on `horizontalPodAutoscalerSyncPeriod` see the [HPA docs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/)
|
||||
|
||||
#### Feature Gates
|
||||
|
||||
```yaml
|
||||
|
|
|
@ -116,7 +116,7 @@ func (c *NodeupModelContext) CNIConfDir() string {
|
|||
|
||||
// buildPKIKubeconfig generates a kubeconfig
|
||||
func (c *NodeupModelContext) buildPKIKubeconfig(id string) (string, error) {
|
||||
caCertificate, err := c.KeyStore.Cert(fi.CertificateId_CA, false)
|
||||
caCertificate, err := c.KeyStore.FindCert(fi.CertificateId_CA)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error fetching CA certificate from keystore: %v", err)
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// s is a helper that builds a *string from a string value
|
||||
|
@ -171,3 +172,8 @@ func addHostPathMapping(pod *v1.Pod, container *v1.Container, name, path string)
|
|||
|
||||
return &container.VolumeMounts[len(container.VolumeMounts)-1]
|
||||
}
|
||||
|
||||
// convEtcdSettingsToMs converts etcd settings to a string rep of int milliseconds
|
||||
func convEtcdSettingsToMs(dur *metav1.Duration) string {
|
||||
return strconv.FormatInt(dur.Nanoseconds()/1000000, 10)
|
||||
}
|
||||
|
|
|
@ -184,12 +184,25 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
|
|||
}
|
||||
|
||||
if b.IsKubernetesGTE("1.7") {
|
||||
certPath := filepath.Join(b.PathSrvKubernetes(), "proxy-client.cert")
|
||||
certPath := filepath.Join(b.PathSrvKubernetes(), "apiserver-aggregator.cert")
|
||||
kubeAPIServer.ProxyClientCertFile = &certPath
|
||||
keyPath := filepath.Join(b.PathSrvKubernetes(), "proxy-client.key")
|
||||
keyPath := filepath.Join(b.PathSrvKubernetes(), "apiserver-aggregator.key")
|
||||
kubeAPIServer.ProxyClientKeyFile = &keyPath
|
||||
}
|
||||
|
||||
// APIServer aggregation options
|
||||
if b.IsKubernetesGTE("1.7") {
|
||||
cert, err := b.KeyStore.FindCert("apiserver-aggregator-ca")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("apiserver aggregator CA cert lookup failed: %v", err.Error())
|
||||
}
|
||||
|
||||
if cert != nil {
|
||||
certPath := filepath.Join(b.PathSrvKubernetes(), "apiserver-aggregator-ca.cert")
|
||||
kubeAPIServer.RequestheaderClientCAFile = certPath
|
||||
}
|
||||
}
|
||||
|
||||
// build the kube-apiserver flags for the service
|
||||
flags, err := flagbuilder.BuildFlagsList(b.Cluster.Spec.KubeAPIServer)
|
||||
if err != nil {
|
||||
|
|
|
@ -149,8 +149,13 @@ func (b *KubeletBuilder) buildSystemdEnvironmentFile(kubeletConfig *kops.Kubelet
|
|||
}
|
||||
|
||||
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Kubenet != nil {
|
||||
// Kubenet is neither CNI nor not-CNI, so we need to pass it `--network-plugin-dir` also
|
||||
flags += " --network-plugin-dir=" + b.CNIBinDir()
|
||||
// Kubenet is neither CNI nor not-CNI, so we need to pass it `--cni-bin-dir` also
|
||||
if b.IsKubernetesGTE("1.9") {
|
||||
// Flag renamed in #53564
|
||||
flags += " --cni-bin-dir=" + b.CNIBinDir()
|
||||
} else {
|
||||
flags += " --network-plugin-dir=" + b.CNIBinDir()
|
||||
}
|
||||
}
|
||||
|
||||
if b.usesContainerizedMounter() {
|
||||
|
|
|
@ -192,22 +192,24 @@ type ProtokubeFlags struct {
|
|||
Channels []string `json:"channels,omitempty" flag:"channels"`
|
||||
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
|
||||
// ClusterID flag is required only for vSphere cloud type, to pass cluster id information to protokube. AWS and GCE workflows ignore this flag.
|
||||
ClusterID *string `json:"cluster-id,omitempty" flag:"cluster-id"`
|
||||
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
|
||||
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
|
||||
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
|
||||
DNSServer *string `json:"dns-server,omitempty" flag:"dns-server"`
|
||||
EtcdImage *string `json:"etcd-image,omitempty" flag:"etcd-image"`
|
||||
InitializeRBAC *bool `json:"initializeRBAC,omitempty" flag:"initialize-rbac"`
|
||||
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
|
||||
Master *bool `json:"master,omitempty" flag:"master"`
|
||||
PeerTLSCaFile *string `json:"peer-ca,omitempty" flag:"peer-ca"`
|
||||
PeerTLSCertFile *string `json:"peer-cert,omitempty" flag:"peer-cert"`
|
||||
PeerTLSKeyFile *string `json:"peer-key,omitempty" flag:"peer-key"`
|
||||
TLSCAFile *string `json:"tls-ca,omitempty" flag:"tls-ca"`
|
||||
TLSCertFile *string `json:"tls-cert,omitempty" flag:"tls-cert"`
|
||||
TLSKeyFile *string `json:"tls-key,omitempty" flag:"tls-key"`
|
||||
Zone []string `json:"zone,omitempty" flag:"zone"`
|
||||
ClusterID *string `json:"cluster-id,omitempty" flag:"cluster-id"`
|
||||
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
|
||||
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
|
||||
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
|
||||
DNSServer *string `json:"dns-server,omitempty" flag:"dns-server"`
|
||||
EtcdImage *string `json:"etcd-image,omitempty" flag:"etcd-image"`
|
||||
EtcdLeaderElectionTimeout *string `json:"etcd-election-timeout,omitempty" flag:"etcd-election-timeout"`
|
||||
EtcdHearbeatInterval *string `json:"etcd-heartbeat-interval,omitempty" flag:"etcd-heartbeat-interval"`
|
||||
InitializeRBAC *bool `json:"initializeRBAC,omitempty" flag:"initialize-rbac"`
|
||||
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
|
||||
Master *bool `json:"master,omitempty" flag:"master"`
|
||||
PeerTLSCaFile *string `json:"peer-ca,omitempty" flag:"peer-ca"`
|
||||
PeerTLSCertFile *string `json:"peer-cert,omitempty" flag:"peer-cert"`
|
||||
PeerTLSKeyFile *string `json:"peer-key,omitempty" flag:"peer-key"`
|
||||
TLSCAFile *string `json:"tls-ca,omitempty" flag:"tls-ca"`
|
||||
TLSCertFile *string `json:"tls-cert,omitempty" flag:"tls-cert"`
|
||||
TLSKeyFile *string `json:"tls-key,omitempty" flag:"tls-key"`
|
||||
Zone []string `json:"zone,omitempty" flag:"zone"`
|
||||
}
|
||||
|
||||
// ProtokubeFlags is responsible for building the command line flags for protokube
|
||||
|
@ -216,12 +218,25 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) *ProtokubeF
|
|||
// lets keep that for another PR and allow the version change
|
||||
imageVersion := t.Cluster.Spec.EtcdClusters[0].Version
|
||||
|
||||
var leaderElectionTimeout string
|
||||
var heartbeatInterval string
|
||||
|
||||
if v := t.Cluster.Spec.EtcdClusters[0].LeaderElectionTimeout; v != nil {
|
||||
leaderElectionTimeout = convEtcdSettingsToMs(v)
|
||||
}
|
||||
|
||||
if v := t.Cluster.Spec.EtcdClusters[0].HeartbeatInterval; v != nil {
|
||||
heartbeatInterval = convEtcdSettingsToMs(v)
|
||||
}
|
||||
|
||||
f := &ProtokubeFlags{
|
||||
Channels: t.NodeupConfig.Channels,
|
||||
Containerized: fi.Bool(true),
|
||||
EtcdImage: s(fmt.Sprintf("gcr.io/google_containers/etcd:%s", imageVersion)),
|
||||
LogLevel: fi.Int32(4),
|
||||
Master: b(t.IsMaster),
|
||||
Channels: t.NodeupConfig.Channels,
|
||||
Containerized: fi.Bool(true),
|
||||
EtcdImage: s(fmt.Sprintf("gcr.io/google_containers/etcd:%s", imageVersion)),
|
||||
EtcdLeaderElectionTimeout: s(leaderElectionTimeout),
|
||||
EtcdHearbeatInterval: s(heartbeatInterval),
|
||||
LogLevel: fi.Int32(4),
|
||||
Master: b(t.IsMaster),
|
||||
}
|
||||
|
||||
// initialize rbac on Kubernetes >= 1.6 and master
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
)
|
||||
|
@ -117,7 +118,7 @@ func (b *SecretBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
}
|
||||
|
||||
if b.IsKubernetesGTE("1.7") {
|
||||
|
||||
// TODO: Remove - we use the apiserver-aggregator keypair instead (which is signed by a different CA)
|
||||
cert, err := b.KeyStore.Cert("apiserver-proxy-client", false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("apiserver proxy client cert lookup failed: %v", err.Error())
|
||||
|
@ -153,6 +154,22 @@ func (b *SecretBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
c.AddTask(t)
|
||||
}
|
||||
|
||||
if b.IsKubernetesGTE("1.7") {
|
||||
if err := b.writeCertificate(c, "apiserver-aggregator"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.writePrivateKey(c, "apiserver-aggregator"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if b.IsKubernetesGTE("1.7") {
|
||||
if err := b.writeCertificate(c, "apiserver-aggregator-ca"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if b.SecretStore != nil {
|
||||
key := "kube"
|
||||
token, err := b.SecretStore.FindSecret(key)
|
||||
|
@ -200,6 +217,55 @@ func (b *SecretBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// writeCertificate writes the specified certificate to the local filesystem, under PathSrvKubernetes()
|
||||
func (b *SecretBuilder) writeCertificate(c *fi.ModelBuilderContext, id string) error {
|
||||
cert, err := b.KeyStore.FindCert(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cert lookup failed for %q: %v", id, err)
|
||||
}
|
||||
|
||||
if cert != nil {
|
||||
serialized, err := cert.AsString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: filepath.Join(b.PathSrvKubernetes(), id+".cert"),
|
||||
Contents: fi.NewStringResource(serialized),
|
||||
Type: nodetasks.FileType_File,
|
||||
}
|
||||
c.AddTask(t)
|
||||
} else {
|
||||
// TODO: Make this an error?
|
||||
glog.Warningf("certificate %q not found", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writePrivateKey writes the specified private key to the local filesystem, under PathSrvKubernetes()
|
||||
func (b *SecretBuilder) writePrivateKey(c *fi.ModelBuilderContext, id string) error {
|
||||
key, err := b.KeyStore.FindPrivateKey(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("private key lookup failed for %q: %v", id, err)
|
||||
}
|
||||
|
||||
serialized, err := key.AsString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: filepath.Join(b.PathSrvKubernetes(), id+".key"),
|
||||
Contents: fi.NewStringResource(serialized),
|
||||
Type: nodetasks.FileType_File,
|
||||
}
|
||||
c.AddTask(t)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allTokens returns a map of all tokens
|
||||
func (b *SecretBuilder) allTokens() (map[string]string, error) {
|
||||
tokens := make(map[string]string)
|
||||
|
|
|
@ -180,7 +180,8 @@ type Assets struct {
|
|||
|
||||
// IAMSpec adds control over the IAM security policies applied to resources
|
||||
type IAMSpec struct {
|
||||
Legacy bool `json:"legacy"`
|
||||
Legacy bool `json:"legacy"`
|
||||
AllowContainerRegistry bool `json:"allowContainerRegistry,omitempty"`
|
||||
}
|
||||
|
||||
// HookSpec is a definition hook
|
||||
|
@ -292,6 +293,10 @@ type EtcdClusterSpec struct {
|
|||
EnableEtcdTLS bool `json:"enableEtcdTLS,omitempty"`
|
||||
// Version is the version of etcd to run i.e. 2.1.2, 3.0.17 etcd
|
||||
Version string `json:"version,omitempty"`
|
||||
// LeaderElectionTimeout is the time (in milliseconds) for an etcd leader election timeout
|
||||
LeaderElectionTimeout *metav1.Duration `json:"leaderElectionTimeout,omitempty"`
|
||||
// HeartbeatInterval is the time (in milliseconds) for an etcd heartbeat interval
|
||||
HeartbeatInterval *metav1.Duration `json:"heartbeatInterval,omitempty"`
|
||||
}
|
||||
|
||||
// EtcdMemberSpec is a specification for a etcd member
|
||||
|
|
|
@ -169,7 +169,7 @@ type KubeProxyConfig struct {
|
|||
|
||||
// KubeAPIServerConfig defines the configuration for the kube api
|
||||
type KubeAPIServerConfig struct {
|
||||
// Image is the docker container used
|
||||
// Image is the docker container usedrun
|
||||
Image string `json:"image,omitempty"`
|
||||
// LogLevel is the logging level of the api
|
||||
LogLevel int32 `json:"logLevel,omitempty" flag:"v" flag-empty:"0"`
|
||||
|
@ -258,6 +258,17 @@ type KubeAPIServerConfig struct {
|
|||
AuthorizationRBACSuperUser *string `json:"authorizationRbacSuperUser,omitempty" flag:"authorization-rbac-super-user"`
|
||||
// ExperimentalEncryptionProviderConfig enables encryption at rest for secrets.
|
||||
ExperimentalEncryptionProviderConfig *string `json:"experimentalEncryptionProviderConfig,omitempty" flag:"experimental-encryption-provider-config"`
|
||||
|
||||
// List of request headers to inspect for usernames. X-Remote-User is common.
|
||||
RequestheaderUsernameHeaders []string `json:"requestheaderUsernameHeaders,omitempty" flag:"requestheader-username-headers"`
|
||||
// List of request headers to inspect for groups. X-Remote-Group is suggested.
|
||||
RequestheaderGroupHeaders []string `json:"requestheaderGroupHeaders,omitempty" flag:"requestheader-group-headers"`
|
||||
// List of request header prefixes to inspect. X-Remote-Extra- is suggested.
|
||||
RequestheaderExtraHeaderPrefixes []string `json:"requestheaderExtraHeaderPrefixes,omitempty" flag:"requestheader-extra-headers-prefix"`
|
||||
//Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers
|
||||
RequestheaderClientCAFile string `json:"requestheaderClientCAFile,omitempty" flag:"requestheader-client-ca-file"`
|
||||
// List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed.
|
||||
RequestheaderAllowedNames []string `json:"requestheaderAllowedNames,omitempty" flag:"requestheader-allowed-names"`
|
||||
}
|
||||
|
||||
// KubeControllerManagerConfig is the configuration for the controller
|
||||
|
@ -294,6 +305,10 @@ type KubeControllerManagerConfig struct {
|
|||
TerminatedPodGCThreshold *int32 `json:"terminatedPodGCThreshold,omitempty" flag:"terminated-pod-gc-threshold"`
|
||||
// UseServiceAccountCredentials controls whether we use individual service account credentials for each controller.
|
||||
UseServiceAccountCredentials *bool `json:"useServiceAccountCredentials,omitempty" flag:"use-service-account-credentials"`
|
||||
// HorizontalPodAutoscalerSyncPeriod is the amount of time between syncs
|
||||
// During each period, the controller manager queries the resource utilization
|
||||
// against the metrics specified in each HorizontalPodAutoscaler definition
|
||||
HorizontalPodAutoscalerSyncPeriod *metav1.Duration `json:"horizontalPodAutoscalerSyncPeriod,omitempty" flag:"horizontal-pod-autoscaler-sync-period"`
|
||||
}
|
||||
|
||||
type CloudControllerManagerConfig struct {
|
||||
|
|
|
@ -179,7 +179,8 @@ type Assets struct {
|
|||
|
||||
// IAMSpec adds control over the IAM security policies applied to resources
|
||||
type IAMSpec struct {
|
||||
Legacy bool `json:"legacy"`
|
||||
Legacy bool `json:"legacy"`
|
||||
AllowContainerRegistry bool `json:"allowContainerRegistry,omitempty"`
|
||||
}
|
||||
|
||||
// HookSpec is a definition hook
|
||||
|
@ -291,6 +292,10 @@ type EtcdClusterSpec struct {
|
|||
EnableEtcdTLS bool `json:"enableEtcdTLS,omitempty"`
|
||||
// Version is the version of etcd to run i.e. 2.1.2, 3.0.17 etcd
|
||||
Version string `json:"version,omitempty"`
|
||||
// LeaderElectionTimeout is the time (in milliseconds) for an etcd leader election timeout
|
||||
LeaderElectionTimeout *metav1.Duration `json:"leaderElectionTimeout,omitempty"`
|
||||
// HeartbeatInterval is the time (in milliseconds) for an etcd heartbeat interval
|
||||
HeartbeatInterval *metav1.Duration `json:"heartbeatInterval,omitempty"`
|
||||
}
|
||||
|
||||
// EtcdMemberSpec is a specification for a etcd member
|
||||
|
|
|
@ -258,6 +258,17 @@ type KubeAPIServerConfig struct {
|
|||
AuthorizationRBACSuperUser *string `json:"authorizationRbacSuperUser,omitempty" flag:"authorization-rbac-super-user"`
|
||||
// ExperimentalEncryptionProviderConfig enables encryption at rest for secrets.
|
||||
ExperimentalEncryptionProviderConfig *string `json:"experimentalEncryptionProviderConfig,omitempty" flag:"experimental-encryption-provider-config"`
|
||||
|
||||
// List of request headers to inspect for usernames. X-Remote-User is common.
|
||||
RequestheaderUsernameHeaders []string `json:"requestheaderUsernameHeaders,omitempty" flag:"requestheader-username-headers"`
|
||||
// List of request headers to inspect for groups. X-Remote-Group is suggested.
|
||||
RequestheaderGroupHeaders []string `json:"requestheaderGroupHeaders,omitempty" flag:"requestheader-group-headers"`
|
||||
// List of request header prefixes to inspect. X-Remote-Extra- is suggested.
|
||||
RequestheaderExtraHeaderPrefixes []string `json:"requestheaderExtraHeaderPrefixes,omitempty" flag:"requestheader-extra-headers-prefix"`
|
||||
//Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers
|
||||
RequestheaderClientCAFile string `json:"requestheaderClientCAFile,omitempty" flag:"requestheader-client-ca-file"`
|
||||
// List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed.
|
||||
RequestheaderAllowedNames []string `json:"requestheaderAllowedNames,omitempty" flag:"requestheader-allowed-names"`
|
||||
}
|
||||
|
||||
// KubeControllerManagerConfig is the configuration for the controller
|
||||
|
@ -294,6 +305,10 @@ type KubeControllerManagerConfig struct {
|
|||
TerminatedPodGCThreshold *int32 `json:"terminatedPodGCThreshold,omitempty" flag:"terminated-pod-gc-threshold"`
|
||||
// UseServiceAccountCredentials controls whether we use individual service account credentials for each controller.
|
||||
UseServiceAccountCredentials *bool `json:"useServiceAccountCredentials,omitempty" flag:"use-service-account-credentials"`
|
||||
// HorizontalPodAutoscalerSyncPeriod is the amount of time between syncs
|
||||
// During each period, the controller manager queries the resource utilization
|
||||
// against the metrics specified in each HorizontalPodAutoscaler definition
|
||||
HorizontalPodAutoscalerSyncPeriod *metav1.Duration `json:"horizontalPodAutoscalerSyncPeriod,omitempty" flag:"horizontal-pod-autoscaler-sync-period"`
|
||||
}
|
||||
|
||||
type CloudControllerManagerConfig struct {
|
||||
|
|
|
@ -1179,6 +1179,8 @@ func autoConvert_v1alpha1_EtcdClusterSpec_To_kops_EtcdClusterSpec(in *EtcdCluste
|
|||
}
|
||||
out.EnableEtcdTLS = in.EnableEtcdTLS
|
||||
out.Version = in.Version
|
||||
out.LeaderElectionTimeout = in.LeaderElectionTimeout
|
||||
out.HeartbeatInterval = in.HeartbeatInterval
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1203,6 +1205,8 @@ func autoConvert_kops_EtcdClusterSpec_To_v1alpha1_EtcdClusterSpec(in *kops.EtcdC
|
|||
}
|
||||
out.EnableEtcdTLS = in.EnableEtcdTLS
|
||||
out.Version = in.Version
|
||||
out.LeaderElectionTimeout = in.LeaderElectionTimeout
|
||||
out.HeartbeatInterval = in.HeartbeatInterval
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1539,6 +1543,7 @@ func Convert_kops_HookSpec_To_v1alpha1_HookSpec(in *kops.HookSpec, out *HookSpec
|
|||
|
||||
func autoConvert_v1alpha1_IAMSpec_To_kops_IAMSpec(in *IAMSpec, out *kops.IAMSpec, s conversion.Scope) error {
|
||||
out.Legacy = in.Legacy
|
||||
out.AllowContainerRegistry = in.AllowContainerRegistry
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1549,6 +1554,7 @@ func Convert_v1alpha1_IAMSpec_To_kops_IAMSpec(in *IAMSpec, out *kops.IAMSpec, s
|
|||
|
||||
func autoConvert_kops_IAMSpec_To_v1alpha1_IAMSpec(in *kops.IAMSpec, out *IAMSpec, s conversion.Scope) error {
|
||||
out.Legacy = in.Legacy
|
||||
out.AllowContainerRegistry = in.AllowContainerRegistry
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1810,6 +1816,11 @@ func autoConvert_v1alpha1_KubeAPIServerConfig_To_kops_KubeAPIServerConfig(in *Ku
|
|||
out.AuthorizationMode = in.AuthorizationMode
|
||||
out.AuthorizationRBACSuperUser = in.AuthorizationRBACSuperUser
|
||||
out.ExperimentalEncryptionProviderConfig = in.ExperimentalEncryptionProviderConfig
|
||||
out.RequestheaderUsernameHeaders = in.RequestheaderUsernameHeaders
|
||||
out.RequestheaderGroupHeaders = in.RequestheaderGroupHeaders
|
||||
out.RequestheaderExtraHeaderPrefixes = in.RequestheaderExtraHeaderPrefixes
|
||||
out.RequestheaderClientCAFile = in.RequestheaderClientCAFile
|
||||
out.RequestheaderAllowedNames = in.RequestheaderAllowedNames
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1862,6 +1873,11 @@ func autoConvert_kops_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(in *ko
|
|||
out.AuthorizationMode = in.AuthorizationMode
|
||||
out.AuthorizationRBACSuperUser = in.AuthorizationRBACSuperUser
|
||||
out.ExperimentalEncryptionProviderConfig = in.ExperimentalEncryptionProviderConfig
|
||||
out.RequestheaderUsernameHeaders = in.RequestheaderUsernameHeaders
|
||||
out.RequestheaderGroupHeaders = in.RequestheaderGroupHeaders
|
||||
out.RequestheaderExtraHeaderPrefixes = in.RequestheaderExtraHeaderPrefixes
|
||||
out.RequestheaderClientCAFile = in.RequestheaderClientCAFile
|
||||
out.RequestheaderAllowedNames = in.RequestheaderAllowedNames
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1893,6 +1909,7 @@ func autoConvert_v1alpha1_KubeControllerManagerConfig_To_kops_KubeControllerMana
|
|||
out.AttachDetachReconcileSyncPeriod = in.AttachDetachReconcileSyncPeriod
|
||||
out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold
|
||||
out.UseServiceAccountCredentials = in.UseServiceAccountCredentials
|
||||
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1924,6 +1941,7 @@ func autoConvert_kops_KubeControllerManagerConfig_To_v1alpha1_KubeControllerMana
|
|||
out.AttachDetachReconcileSyncPeriod = in.AttachDetachReconcileSyncPeriod
|
||||
out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold
|
||||
out.UseServiceAccountCredentials = in.UseServiceAccountCredentials
|
||||
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1233,6 +1233,24 @@ func (in *EtcdClusterSpec) DeepCopyInto(out *EtcdClusterSpec) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if in.LeaderElectionTimeout != nil {
|
||||
in, out := &in.LeaderElectionTimeout, &out.LeaderElectionTimeout
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.HeartbeatInterval != nil {
|
||||
in, out := &in.HeartbeatInterval, &out.HeartbeatInterval
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2017,6 +2035,26 @@ func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.RequestheaderUsernameHeaders != nil {
|
||||
in, out := &in.RequestheaderUsernameHeaders, &out.RequestheaderUsernameHeaders
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequestheaderGroupHeaders != nil {
|
||||
in, out := &in.RequestheaderGroupHeaders, &out.RequestheaderGroupHeaders
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequestheaderExtraHeaderPrefixes != nil {
|
||||
in, out := &in.RequestheaderExtraHeaderPrefixes, &out.RequestheaderExtraHeaderPrefixes
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequestheaderAllowedNames != nil {
|
||||
in, out := &in.RequestheaderAllowedNames, &out.RequestheaderAllowedNames
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2087,6 +2125,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.HorizontalPodAutoscalerSyncPeriod != nil {
|
||||
in, out := &in.HorizontalPodAutoscalerSyncPeriod, &out.HorizontalPodAutoscalerSyncPeriod
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -180,7 +180,8 @@ type Assets struct {
|
|||
|
||||
// IAMSpec adds control over the IAM security policies applied to resources
|
||||
type IAMSpec struct {
|
||||
Legacy bool `json:"legacy"`
|
||||
Legacy bool `json:"legacy"`
|
||||
AllowContainerRegistry bool `json:"allowContainerRegistry,omitempty"`
|
||||
}
|
||||
|
||||
// HookSpec is a definition hook
|
||||
|
@ -289,6 +290,10 @@ type EtcdClusterSpec struct {
|
|||
EnableEtcdTLS bool `json:"enableEtcdTLS,omitempty"`
|
||||
// Version is the version of etcd to run i.e. 2.1.2, 3.0.17 etcd
|
||||
Version string `json:"version,omitempty"`
|
||||
// LeaderElectionTimeout is the time (in milliseconds) for an etcd leader election timeout
|
||||
LeaderElectionTimeout *metav1.Duration `json:"leaderElectionTimeout,omitempty"`
|
||||
// HeartbeatInterval is the time (in milliseconds) for an etcd heartbeat interval
|
||||
HeartbeatInterval *metav1.Duration `json:"heartbeatInterval,omitempty"`
|
||||
}
|
||||
|
||||
// EtcdMemberSpec is a specification for a etcd member
|
||||
|
|
|
@ -258,6 +258,17 @@ type KubeAPIServerConfig struct {
|
|||
AuthorizationRBACSuperUser *string `json:"authorizationRbacSuperUser,omitempty" flag:"authorization-rbac-super-user"`
|
||||
// ExperimentalEncryptionProviderConfig enables encryption at rest for secrets.
|
||||
ExperimentalEncryptionProviderConfig *string `json:"experimentalEncryptionProviderConfig,omitempty" flag:"experimental-encryption-provider-config"`
|
||||
|
||||
// List of request headers to inspect for usernames. X-Remote-User is common.
|
||||
RequestheaderUsernameHeaders []string `json:"requestheaderUsernameHeaders,omitempty" flag:"requestheader-username-headers"`
|
||||
// List of request headers to inspect for groups. X-Remote-Group is suggested.
|
||||
RequestheaderGroupHeaders []string `json:"requestheaderGroupHeaders,omitempty" flag:"requestheader-group-headers"`
|
||||
// List of request header prefixes to inspect. X-Remote-Extra- is suggested.
|
||||
RequestheaderExtraHeaderPrefixes []string `json:"requestheaderExtraHeaderPrefixes,omitempty" flag:"requestheader-extra-headers-prefix"`
|
||||
//Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers
|
||||
RequestheaderClientCAFile string `json:"requestheaderClientCAFile,omitempty" flag:"requestheader-client-ca-file"`
|
||||
// List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed.
|
||||
RequestheaderAllowedNames []string `json:"requestheaderAllowedNames,omitempty" flag:"requestheader-allowed-names"`
|
||||
}
|
||||
|
||||
// KubeControllerManagerConfig is the configuration for the controller
|
||||
|
@ -292,9 +303,12 @@ type KubeControllerManagerConfig struct {
|
|||
// before the terminated pod garbage collector starts deleting terminated pods.
|
||||
// If <= 0, the terminated pod garbage collector is disabled.
|
||||
TerminatedPodGCThreshold *int32 `json:"terminatedPodGCThreshold,omitempty" flag:"terminated-pod-gc-threshold"`
|
||||
|
||||
// UseServiceAccountCredentials controls whether we use individual service account credentials for each controller.
|
||||
UseServiceAccountCredentials *bool `json:"useServiceAccountCredentials,omitempty" flag:"use-service-account-credentials"`
|
||||
// HorizontalPodAutoscalerSyncPeriod is the amount of time between syncs
|
||||
// During each period, the controller manager queries the resource utilization
|
||||
// against the metrics specified in each HorizontalPodAutoscaler definition
|
||||
HorizontalPodAutoscalerSyncPeriod *metav1.Duration `json:"horizontalPodAutoscalerSyncPeriod,omitempty" flag:"horizontal-pod-autoscaler-sync-period"`
|
||||
}
|
||||
|
||||
type CloudControllerManagerConfig struct {
|
||||
|
|
|
@ -1278,6 +1278,8 @@ func autoConvert_v1alpha2_EtcdClusterSpec_To_kops_EtcdClusterSpec(in *EtcdCluste
|
|||
}
|
||||
out.EnableEtcdTLS = in.EnableEtcdTLS
|
||||
out.Version = in.Version
|
||||
out.LeaderElectionTimeout = in.LeaderElectionTimeout
|
||||
out.HeartbeatInterval = in.HeartbeatInterval
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1302,6 +1304,8 @@ func autoConvert_kops_EtcdClusterSpec_To_v1alpha2_EtcdClusterSpec(in *kops.EtcdC
|
|||
}
|
||||
out.EnableEtcdTLS = in.EnableEtcdTLS
|
||||
out.Version = in.Version
|
||||
out.LeaderElectionTimeout = in.LeaderElectionTimeout
|
||||
out.HeartbeatInterval = in.HeartbeatInterval
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1648,6 +1652,7 @@ func Convert_kops_HookSpec_To_v1alpha2_HookSpec(in *kops.HookSpec, out *HookSpec
|
|||
|
||||
func autoConvert_v1alpha2_IAMSpec_To_kops_IAMSpec(in *IAMSpec, out *kops.IAMSpec, s conversion.Scope) error {
|
||||
out.Legacy = in.Legacy
|
||||
out.AllowContainerRegistry = in.AllowContainerRegistry
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1658,6 +1663,7 @@ func Convert_v1alpha2_IAMSpec_To_kops_IAMSpec(in *IAMSpec, out *kops.IAMSpec, s
|
|||
|
||||
func autoConvert_kops_IAMSpec_To_v1alpha2_IAMSpec(in *kops.IAMSpec, out *IAMSpec, s conversion.Scope) error {
|
||||
out.Legacy = in.Legacy
|
||||
out.AllowContainerRegistry = in.AllowContainerRegistry
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2072,6 +2078,11 @@ func autoConvert_v1alpha2_KubeAPIServerConfig_To_kops_KubeAPIServerConfig(in *Ku
|
|||
out.AuthorizationMode = in.AuthorizationMode
|
||||
out.AuthorizationRBACSuperUser = in.AuthorizationRBACSuperUser
|
||||
out.ExperimentalEncryptionProviderConfig = in.ExperimentalEncryptionProviderConfig
|
||||
out.RequestheaderUsernameHeaders = in.RequestheaderUsernameHeaders
|
||||
out.RequestheaderGroupHeaders = in.RequestheaderGroupHeaders
|
||||
out.RequestheaderExtraHeaderPrefixes = in.RequestheaderExtraHeaderPrefixes
|
||||
out.RequestheaderClientCAFile = in.RequestheaderClientCAFile
|
||||
out.RequestheaderAllowedNames = in.RequestheaderAllowedNames
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2124,6 +2135,11 @@ func autoConvert_kops_KubeAPIServerConfig_To_v1alpha2_KubeAPIServerConfig(in *ko
|
|||
out.AuthorizationMode = in.AuthorizationMode
|
||||
out.AuthorizationRBACSuperUser = in.AuthorizationRBACSuperUser
|
||||
out.ExperimentalEncryptionProviderConfig = in.ExperimentalEncryptionProviderConfig
|
||||
out.RequestheaderUsernameHeaders = in.RequestheaderUsernameHeaders
|
||||
out.RequestheaderGroupHeaders = in.RequestheaderGroupHeaders
|
||||
out.RequestheaderExtraHeaderPrefixes = in.RequestheaderExtraHeaderPrefixes
|
||||
out.RequestheaderClientCAFile = in.RequestheaderClientCAFile
|
||||
out.RequestheaderAllowedNames = in.RequestheaderAllowedNames
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2155,6 +2171,7 @@ func autoConvert_v1alpha2_KubeControllerManagerConfig_To_kops_KubeControllerMana
|
|||
out.AttachDetachReconcileSyncPeriod = in.AttachDetachReconcileSyncPeriod
|
||||
out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold
|
||||
out.UseServiceAccountCredentials = in.UseServiceAccountCredentials
|
||||
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2186,6 +2203,7 @@ func autoConvert_kops_KubeControllerManagerConfig_To_v1alpha2_KubeControllerMana
|
|||
out.AttachDetachReconcileSyncPeriod = in.AttachDetachReconcileSyncPeriod
|
||||
out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold
|
||||
out.UseServiceAccountCredentials = in.UseServiceAccountCredentials
|
||||
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1243,6 +1243,24 @@ func (in *EtcdClusterSpec) DeepCopyInto(out *EtcdClusterSpec) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if in.LeaderElectionTimeout != nil {
|
||||
in, out := &in.LeaderElectionTimeout, &out.LeaderElectionTimeout
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.HeartbeatInterval != nil {
|
||||
in, out := &in.HeartbeatInterval, &out.HeartbeatInterval
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2143,6 +2161,26 @@ func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.RequestheaderUsernameHeaders != nil {
|
||||
in, out := &in.RequestheaderUsernameHeaders, &out.RequestheaderUsernameHeaders
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequestheaderGroupHeaders != nil {
|
||||
in, out := &in.RequestheaderGroupHeaders, &out.RequestheaderGroupHeaders
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequestheaderExtraHeaderPrefixes != nil {
|
||||
in, out := &in.RequestheaderExtraHeaderPrefixes, &out.RequestheaderExtraHeaderPrefixes
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequestheaderAllowedNames != nil {
|
||||
in, out := &in.RequestheaderAllowedNames, &out.RequestheaderAllowedNames
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2213,6 +2251,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.HorizontalPodAutoscalerSyncPeriod != nil {
|
||||
in, out := &in.HorizontalPodAutoscalerSyncPeriod, &out.HorizontalPodAutoscalerSyncPeriod
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -117,6 +117,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
if strict && c.Spec.KubeControllerManager == nil {
|
||||
return field.Required(fieldSpec.Child("KubeControllerManager"), "KubeControllerManager not configured")
|
||||
}
|
||||
if kubernetesRelease.LT(semver.MustParse("1.7.0")) && c.Spec.ExternalCloudControllerManager != nil {
|
||||
return field.Invalid(fieldSpec.Child("ExternalCloudControllerManager"), c.Spec.ExternalCloudControllerManager, "ExternalCloudControllerManager is not supported in version 1.6.0 or lower")
|
||||
}
|
||||
if strict && c.Spec.KubeDNS == nil {
|
||||
return field.Required(fieldSpec.Child("KubeDNS"), "KubeDNS not configured")
|
||||
}
|
||||
|
@ -279,22 +282,23 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
}
|
||||
|
||||
if c.Spec.Kubelet != nil && (strict || c.Spec.Kubelet.CloudProvider != "") {
|
||||
if k8sCloudProvider != c.Spec.Kubelet.CloudProvider {
|
||||
if c.Spec.Kubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.Kubelet.CloudProvider {
|
||||
return field.Invalid(fieldSpec.Child("Kubelet", "CloudProvider"), c.Spec.Kubelet.CloudProvider, "Did not match cluster CloudProvider")
|
||||
}
|
||||
}
|
||||
if c.Spec.MasterKubelet != nil && (strict || c.Spec.MasterKubelet.CloudProvider != "") {
|
||||
if k8sCloudProvider != c.Spec.MasterKubelet.CloudProvider {
|
||||
if c.Spec.MasterKubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.MasterKubelet.CloudProvider {
|
||||
return field.Invalid(fieldSpec.Child("MasterKubelet", "CloudProvider"), c.Spec.MasterKubelet.CloudProvider, "Did not match cluster CloudProvider")
|
||||
|
||||
}
|
||||
}
|
||||
if c.Spec.KubeAPIServer != nil && (strict || c.Spec.KubeAPIServer.CloudProvider != "") {
|
||||
if k8sCloudProvider != c.Spec.KubeAPIServer.CloudProvider {
|
||||
if c.Spec.KubeAPIServer.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeAPIServer.CloudProvider {
|
||||
return field.Invalid(fieldSpec.Child("KubeAPIServer", "CloudProvider"), c.Spec.KubeAPIServer.CloudProvider, "Did not match cluster CloudProvider")
|
||||
}
|
||||
}
|
||||
if c.Spec.KubeControllerManager != nil && (strict || c.Spec.KubeControllerManager.CloudProvider != "") {
|
||||
if k8sCloudProvider != c.Spec.KubeControllerManager.CloudProvider {
|
||||
if c.Spec.KubeControllerManager.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeControllerManager.CloudProvider {
|
||||
return field.Invalid(fieldSpec.Child("KubeControllerManager", "CloudProvider"), c.Spec.KubeControllerManager.CloudProvider, "Did not match cluster CloudProvider")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1402,6 +1402,24 @@ func (in *EtcdClusterSpec) DeepCopyInto(out *EtcdClusterSpec) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if in.LeaderElectionTimeout != nil {
|
||||
in, out := &in.LeaderElectionTimeout, &out.LeaderElectionTimeout
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.HeartbeatInterval != nil {
|
||||
in, out := &in.HeartbeatInterval, &out.HeartbeatInterval
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2362,6 +2380,26 @@ func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.RequestheaderUsernameHeaders != nil {
|
||||
in, out := &in.RequestheaderUsernameHeaders, &out.RequestheaderUsernameHeaders
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequestheaderGroupHeaders != nil {
|
||||
in, out := &in.RequestheaderGroupHeaders, &out.RequestheaderGroupHeaders
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequestheaderExtraHeaderPrefixes != nil {
|
||||
in, out := &in.RequestheaderExtraHeaderPrefixes, &out.RequestheaderExtraHeaderPrefixes
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequestheaderAllowedNames != nil {
|
||||
in, out := &in.RequestheaderAllowedNames, &out.RequestheaderAllowedNames
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2432,6 +2470,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.HorizontalPodAutoscalerSyncPeriod != nil {
|
||||
in, out := &in.HorizontalPodAutoscalerSyncPeriod, &out.HorizontalPodAutoscalerSyncPeriod
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -94,6 +94,10 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error {
|
|||
clusterSpec.KubeAPIServer.AuthorizationMode = fi.String("RBAC")
|
||||
}
|
||||
|
||||
if err := b.configureAggregation(clusterSpec); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
image, err := Image("kube-apiserver", clusterSpec, b.AssetBuilder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -115,6 +119,10 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error {
|
|||
return fmt.Errorf("unknown cloudprovider %q", clusterSpec.CloudProvider)
|
||||
}
|
||||
|
||||
if clusterSpec.ExternalCloudControllerManager != nil {
|
||||
c.CloudProvider = "external"
|
||||
}
|
||||
|
||||
c.LogLevel = 2
|
||||
c.SecurePort = 443
|
||||
c.Address = "127.0.0.1"
|
||||
|
@ -243,3 +251,15 @@ func (b *KubeAPIServerOptionsBuilder) buildAPIServerCount(clusterSpec *kops.Clus
|
|||
|
||||
return count
|
||||
}
|
||||
|
||||
// configureAggregation sets up the aggregation options
|
||||
func (b *KubeAPIServerOptionsBuilder) configureAggregation(clusterSpec *kops.ClusterSpec) error {
|
||||
if b.IsKubernetesGTE("1.7") {
|
||||
clusterSpec.KubeAPIServer.RequestheaderAllowedNames = []string{"aggregator"}
|
||||
clusterSpec.KubeAPIServer.RequestheaderExtraHeaderPrefixes = []string{"X-Remote-Extra-"}
|
||||
clusterSpec.KubeAPIServer.RequestheaderGroupHeaders = []string{"X-Remote-Group"}
|
||||
clusterSpec.KubeAPIServer.RequestheaderUsernameHeaders = []string{"X-Remote-User"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -117,6 +117,10 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
|
|||
return fmt.Errorf("unknown cloudprovider %q", clusterSpec.CloudProvider)
|
||||
}
|
||||
|
||||
if clusterSpec.ExternalCloudControllerManager != nil {
|
||||
kcm.CloudProvider = "external"
|
||||
}
|
||||
|
||||
if kcm.Master == "" {
|
||||
if b.Context.IsKubernetesLT("1.6") {
|
||||
// As of 1.6, we find the master using kubeconfig
|
||||
|
|
|
@ -172,6 +172,10 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error {
|
|||
clusterSpec.Kubelet.HairpinMode = "promiscuous-bridge"
|
||||
}
|
||||
|
||||
if clusterSpec.ExternalCloudControllerManager != nil {
|
||||
clusterSpec.Kubelet.CloudProvider = "external"
|
||||
}
|
||||
|
||||
usesKubenet, err := UsesKubenet(clusterSpec)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -100,13 +100,12 @@ func (l *Statement) Equal(r *Statement) bool {
|
|||
// PolicyBuilder struct defines all valid fields to be used when building the
|
||||
// AWS IAM policy document for a given instance group role.
|
||||
type PolicyBuilder struct {
|
||||
Cluster *kops.Cluster
|
||||
CreateECRPerms bool
|
||||
HostedZoneID string
|
||||
KMSKeys []string
|
||||
Region string
|
||||
ResourceARN *string
|
||||
Role kops.InstanceGroupRole
|
||||
Cluster *kops.Cluster
|
||||
HostedZoneID string
|
||||
KMSKeys []string
|
||||
Region string
|
||||
ResourceARN *string
|
||||
Role kops.InstanceGroupRole
|
||||
}
|
||||
|
||||
// BuildAWSPolicy builds a set of IAM policy statements based on the
|
||||
|
@ -169,10 +168,6 @@ func (b *PolicyBuilder) BuildAWSPolicyMaster() (*Policy, error) {
|
|||
addKMSIAMPolicies(p, stringorslice.Slice(b.KMSKeys), b.Cluster.Spec.IAM.Legacy)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.IAM.Legacy || b.CreateECRPerms {
|
||||
addECRPermissions(p)
|
||||
}
|
||||
|
||||
if b.HostedZoneID != "" {
|
||||
addRoute53Permissions(p, b.HostedZoneID)
|
||||
}
|
||||
|
@ -181,6 +176,10 @@ func (b *PolicyBuilder) BuildAWSPolicyMaster() (*Policy, error) {
|
|||
addRoute53ListHostedZonesPermission(p)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.IAM.Legacy || b.Cluster.Spec.IAM.AllowContainerRegistry {
|
||||
addECRPermissions(p)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
|
@ -199,10 +198,6 @@ func (b *PolicyBuilder) BuildAWSPolicyNode() (*Policy, error) {
|
|||
return nil, fmt.Errorf("failed to generate AWS IAM S3 access statements: %v", err)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.IAM.Legacy || b.CreateECRPerms {
|
||||
addECRPermissions(p)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.IAM.Legacy {
|
||||
if b.HostedZoneID != "" {
|
||||
addRoute53Permissions(p, b.HostedZoneID)
|
||||
|
@ -210,6 +205,10 @@ func (b *PolicyBuilder) BuildAWSPolicyNode() (*Policy, error) {
|
|||
addRoute53ListHostedZonesPermission(p)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.IAM.Legacy || b.Cluster.Spec.IAM.AllowContainerRegistry {
|
||||
addECRPermissions(p)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -78,39 +78,64 @@ func TestRoundTrip(t *testing.T) {
|
|||
|
||||
func TestPolicyGeneration(t *testing.T) {
|
||||
grid := []struct {
|
||||
Role kops.InstanceGroupRole
|
||||
LegacyIAM bool
|
||||
Policy string
|
||||
Role kops.InstanceGroupRole
|
||||
LegacyIAM bool
|
||||
AllowContainerRegistry bool
|
||||
Policy string
|
||||
}{
|
||||
{
|
||||
Role: "Master",
|
||||
LegacyIAM: true,
|
||||
Policy: "tests/iam_builder_master_legacy.json",
|
||||
Role: "Master",
|
||||
LegacyIAM: true,
|
||||
AllowContainerRegistry: false,
|
||||
Policy: "tests/iam_builder_master_legacy.json",
|
||||
},
|
||||
{
|
||||
Role: "Master",
|
||||
LegacyIAM: false,
|
||||
Policy: "tests/iam_builder_master_strict.json",
|
||||
Role: "Master",
|
||||
LegacyIAM: false,
|
||||
AllowContainerRegistry: false,
|
||||
Policy: "tests/iam_builder_master_strict.json",
|
||||
},
|
||||
{
|
||||
Role: "Node",
|
||||
LegacyIAM: true,
|
||||
Policy: "tests/iam_builder_node_legacy.json",
|
||||
Role: "Master",
|
||||
LegacyIAM: false,
|
||||
AllowContainerRegistry: true,
|
||||
Policy: "tests/iam_builder_master_strict_ecr.json",
|
||||
},
|
||||
{
|
||||
Role: "Node",
|
||||
LegacyIAM: false,
|
||||
Policy: "tests/iam_builder_node_strict.json",
|
||||
Role: "Node",
|
||||
LegacyIAM: true,
|
||||
AllowContainerRegistry: false,
|
||||
Policy: "tests/iam_builder_node_legacy.json",
|
||||
},
|
||||
{
|
||||
Role: "Bastion",
|
||||
LegacyIAM: true,
|
||||
Policy: "tests/iam_builder_bastion.json",
|
||||
Role: "Node",
|
||||
LegacyIAM: false,
|
||||
AllowContainerRegistry: false,
|
||||
Policy: "tests/iam_builder_node_strict.json",
|
||||
},
|
||||
{
|
||||
Role: "Bastion",
|
||||
LegacyIAM: false,
|
||||
Policy: "tests/iam_builder_bastion.json",
|
||||
Role: "Node",
|
||||
LegacyIAM: false,
|
||||
AllowContainerRegistry: true,
|
||||
Policy: "tests/iam_builder_node_strict_ecr.json",
|
||||
},
|
||||
{
|
||||
Role: "Bastion",
|
||||
LegacyIAM: true,
|
||||
AllowContainerRegistry: false,
|
||||
Policy: "tests/iam_builder_bastion.json",
|
||||
},
|
||||
{
|
||||
Role: "Bastion",
|
||||
LegacyIAM: false,
|
||||
AllowContainerRegistry: false,
|
||||
Policy: "tests/iam_builder_bastion.json",
|
||||
},
|
||||
{
|
||||
Role: "Bastion",
|
||||
LegacyIAM: false,
|
||||
AllowContainerRegistry: true,
|
||||
Policy: "tests/iam_builder_bastion.json",
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -120,7 +145,8 @@ func TestPolicyGeneration(t *testing.T) {
|
|||
Spec: kops.ClusterSpec{
|
||||
ConfigStore: "s3://kops-tests/iam-builder-test.k8s.local",
|
||||
IAM: &kops.IAMSpec{
|
||||
Legacy: x.LegacyIAM,
|
||||
Legacy: x.LegacyIAM,
|
||||
AllowContainerRegistry: x.AllowContainerRegistry,
|
||||
},
|
||||
EtcdClusters: []*kops.EtcdClusterSpec{
|
||||
{
|
||||
|
|
|
@ -97,6 +97,16 @@
|
|||
"key-id-3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"route53:ListHostedZones"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sECR",
|
||||
"Effect": "Allow",
|
||||
|
@ -112,16 +122,6 @@
|
|||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"route53:ListHostedZones"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,170 @@
|
|||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "kopsK8sEC2MasterPermsDescribeResources",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:DescribeInstances",
|
||||
"ec2:DescribeRouteTables",
|
||||
"ec2:DescribeSecurityGroups",
|
||||
"ec2:DescribeSubnets",
|
||||
"ec2:DescribeVolumes"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sEC2MasterPermsAllResources",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:CreateRoute",
|
||||
"ec2:CreateSecurityGroup",
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateVolume",
|
||||
"ec2:ModifyInstanceAttribute"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sEC2MasterPermsTaggedResources",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:AttachVolume",
|
||||
"ec2:AuthorizeSecurityGroupIngress",
|
||||
"ec2:DeleteRoute",
|
||||
"ec2:DeleteSecurityGroup",
|
||||
"ec2:DeleteVolume",
|
||||
"ec2:DetachVolume",
|
||||
"ec2:RevokeSecurityGroupIngress"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
],
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"ec2:ResourceTag/KubernetesCluster": "iam-builder-test.k8s.local"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sASMasterPermsAllResources",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
"autoscaling:DescribeLaunchConfigurations",
|
||||
"autoscaling:GetAsgForInstance"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sASMasterPermsTaggedResources",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"autoscaling:SetDesiredCapacity",
|
||||
"autoscaling:TerminateInstanceInAutoScalingGroup",
|
||||
"autoscaling:UpdateAutoScalingGroup"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
],
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"ec2:ResourceTag/KubernetesCluster": "iam-builder-test.k8s.local"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sELBMasterPermsRestrictive",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:AttachLoadBalancerToSubnets",
|
||||
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
|
||||
"elasticloadbalancing:CreateLoadBalancer",
|
||||
"elasticloadbalancing:CreateLoadBalancerPolicy",
|
||||
"elasticloadbalancing:CreateLoadBalancerListeners",
|
||||
"elasticloadbalancing:ConfigureHealthCheck",
|
||||
"elasticloadbalancing:DeleteLoadBalancer",
|
||||
"elasticloadbalancing:DeleteLoadBalancerListeners",
|
||||
"elasticloadbalancing:DescribeLoadBalancers",
|
||||
"elasticloadbalancing:DescribeLoadBalancerAttributes",
|
||||
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
|
||||
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
|
||||
"elasticloadbalancing:ModifyLoadBalancerAttributes",
|
||||
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
|
||||
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsMasterCertIAMPerms",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"iam:ListServerCertificates",
|
||||
"iam:GetServerCertificate"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sS3GetListBucket",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:GetBucketLocation",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::kops-tests"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sS3MasterBucketFullGet",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:Get*"
|
||||
],
|
||||
"Resource": "arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/*"
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sKMSEncryptedVolumes",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"kms:CreateGrant",
|
||||
"kms:Decrypt",
|
||||
"kms:DescribeKey",
|
||||
"kms:Encrypt",
|
||||
"kms:GenerateDataKey*",
|
||||
"kms:ReEncrypt*"
|
||||
],
|
||||
"Resource": [
|
||||
"key-id-1",
|
||||
"key-id-2",
|
||||
"key-id-3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sECR",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetRepositoryPolicy",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecr:ListImages",
|
||||
"ecr:BatchGetImage"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -30,6 +30,16 @@
|
|||
],
|
||||
"Resource": "arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/*"
|
||||
},
|
||||
{
|
||||
"Sid": "",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"route53:ListHostedZones"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sECR",
|
||||
"Effect": "Allow",
|
||||
|
@ -45,16 +55,6 @@
|
|||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"route53:ListHostedZones"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "kopsK8sEC2NodePerms",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:DescribeInstances"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sS3GetListBucket",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:GetBucketLocation",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::kops-tests"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sS3NodeBucketSelectiveGet",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:Get*"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/addons/*",
|
||||
"arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/cluster.spec",
|
||||
"arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/config",
|
||||
"arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/instancegroup/*",
|
||||
"arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/pki/issued/*",
|
||||
"arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/pki/private/kube-proxy/*",
|
||||
"arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/pki/private/kubelet/*",
|
||||
"arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/pki/ssh/*",
|
||||
"arn:aws:s3:::kops-tests/iam-builder-test.k8s.local/secrets/dockerconfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Sid": "kopsK8sECR",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetRepositoryPolicy",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecr:ListImages",
|
||||
"ecr:BatchGetImage"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
)
|
||||
|
||||
// NetworkModelBuilder configures network objects
|
||||
|
@ -148,6 +149,18 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
subnetName := subnetSpec.Name + "." + b.ClusterName()
|
||||
tags := b.CloudTags(subnetName, sharedSubnet)
|
||||
|
||||
// Apply tags so that Kubernetes knows which subnets should be used for internal/external ELBs
|
||||
switch subnetSpec.Type {
|
||||
case kops.SubnetTypePublic, kops.SubnetTypeUtility:
|
||||
tags[aws.TagNameSubnetPublicELB] = "1"
|
||||
|
||||
case kops.SubnetTypePrivate:
|
||||
tags[aws.TagNameSubnetInternalELB] = "1"
|
||||
|
||||
default:
|
||||
glog.V(2).Infof("unable to properly tag subnet %q because it has unknown type %q. Load balancers may be created in incorrect subnets", subnetSpec.Name, subnetSpec.Type)
|
||||
}
|
||||
|
||||
subnet := &awstasks.Subnet{
|
||||
Name: s(subnetName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
|
|
@ -35,13 +35,25 @@ var _ fi.ModelBuilder = &PKIModelBuilder{}
|
|||
|
||||
// Build is responsible for generating the various pki assets
|
||||
func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
|
||||
// TODO: Only create the CA via this task
|
||||
defaultCA := &fitasks.Keypair{
|
||||
Name: fi.String(fi.CertificateId_CA),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=kubernetes",
|
||||
Type: "ca",
|
||||
}
|
||||
c.AddTask(defaultCA)
|
||||
|
||||
{
|
||||
|
||||
t := &fitasks.Keypair{
|
||||
Name: fi.String("kubelet"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
Subject: "o=" + user.NodesGroup + ",cn=kubelet",
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
@ -54,6 +66,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=kubelet-api",
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
})
|
||||
}
|
||||
{
|
||||
|
@ -62,6 +75,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=" + user.KubeScheduler,
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
@ -72,6 +86,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=" + user.KubeProxy,
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
@ -82,6 +97,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=" + user.KubeControllerManager,
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
@ -101,6 +117,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Name: fi.String("etcd"),
|
||||
Subject: "cn=etcd",
|
||||
Type: "server",
|
||||
Signer: defaultCA,
|
||||
})
|
||||
}
|
||||
{
|
||||
|
@ -109,6 +126,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=etcd-client",
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -118,6 +136,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Name: fi.String("kube-router"),
|
||||
Subject: "cn=" + "system:kube-router",
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
@ -128,6 +147,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Lifecycle: b.Lifecycle,
|
||||
Subject: "o=" + user.SystemPrivilegedGroup + ",cn=kubecfg",
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
@ -138,16 +158,38 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=apiserver-proxy-client",
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
||||
{
|
||||
aggregatorCA := &fitasks.Keypair{
|
||||
Name: fi.String("apiserver-aggregator-ca"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=apiserver-aggregator-ca",
|
||||
Type: "ca",
|
||||
}
|
||||
c.AddTask(aggregatorCA)
|
||||
|
||||
aggregator := &fitasks.Keypair{
|
||||
Name: fi.String("apiserver-aggregator"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
// Must match RequestheaderAllowedNames
|
||||
Subject: "cn=aggregator",
|
||||
Type: "client",
|
||||
Signer: aggregatorCA,
|
||||
}
|
||||
c.AddTask(aggregator)
|
||||
}
|
||||
|
||||
{
|
||||
t := &fitasks.Keypair{
|
||||
Name: fi.String("kops"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subject: "o=" + user.SystemPrivilegedGroup + ",cn=kops",
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
@ -183,6 +225,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
Subject: "cn=kubernetes-master",
|
||||
Type: "server",
|
||||
AlternateNames: alternateNames,
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ func SignNewCertificate(privateKey *PrivateKey, template *x509.Certificate, sign
|
|||
template.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment
|
||||
}
|
||||
|
||||
if template.ExtKeyUsage == nil {
|
||||
if template.ExtKeyUsage == nil && !template.IsCA {
|
||||
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
|
||||
}
|
||||
//c.SignatureAlgorithm = do we want to overrride?
|
||||
|
|
|
@ -37,7 +37,11 @@ go_library(
|
|||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["aws_test.go"],
|
||||
size = "small",
|
||||
srcs = [
|
||||
"aws_test.go",
|
||||
"gce_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//cloudmock/aws/mockec2:go_default_library",
|
||||
|
|
|
@ -15,6 +15,7 @@ go_library(
|
|||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["dns_test.go"],
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
|
|
|
@ -39,11 +39,15 @@ const (
|
|||
typeDisk = "Disk"
|
||||
typeInstanceGroupManager = "InstanceGroupManager"
|
||||
typeTargetPool = "TargetPool"
|
||||
typeFirewallRule = "FirewallRule"
|
||||
typeForwardingRule = "ForwardingRule"
|
||||
typeAddress = "Address"
|
||||
typeRoute = "Route"
|
||||
)
|
||||
|
||||
// Maximum number of `-` separated tokens in a name
|
||||
const maxPrefixTokens = 4
|
||||
|
||||
func (c *ClusterResources) listResourcesGCE() (map[string]*tracker.Resource, error) {
|
||||
gceCloud := c.Cloud.(gce.GCECloud)
|
||||
if c.Region == "" {
|
||||
|
@ -85,6 +89,7 @@ func (c *ClusterResources) listResourcesGCE() (map[string]*tracker.Resource, err
|
|||
d.listInstanceGroupManagersAndInstances,
|
||||
d.listTargetPools,
|
||||
d.listForwardingRules,
|
||||
d.listFirewallRules,
|
||||
d.listGCEDisks,
|
||||
d.listGCEDNSZone,
|
||||
// TODO: Find routes via instances (via instance groups)
|
||||
|
@ -476,6 +481,74 @@ func deleteForwardingRule(cloud fi.Cloud, r *tracker.Resource) error {
|
|||
return c.WaitForOp(op)
|
||||
}
|
||||
|
||||
// listFirewallRules discovers Firewall objects for the cluster
|
||||
func (d *clusterDiscoveryGCE) listFirewallRules() ([]*tracker.Resource, error) {
|
||||
c := d.gceCloud
|
||||
|
||||
var resourceTrackers []*tracker.Resource
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := c.Compute().Firewalls.List(c.Project()).Pages(ctx, func(page *compute.FirewallList) error {
|
||||
for _, fr := range page.Items {
|
||||
if !d.matchesClusterNameMultipart(fr.Name, maxPrefixTokens) {
|
||||
continue
|
||||
}
|
||||
|
||||
foundMatchingTarget := false
|
||||
tagPrefix := gce.SafeClusterName(d.clusterName) + "-"
|
||||
for _, target := range fr.TargetTags {
|
||||
if strings.HasPrefix(target, tagPrefix) {
|
||||
foundMatchingTarget = true
|
||||
}
|
||||
}
|
||||
if !foundMatchingTarget {
|
||||
break
|
||||
}
|
||||
|
||||
resourceTracker := &tracker.Resource{
|
||||
Name: fr.Name,
|
||||
ID: fr.Name,
|
||||
Type: typeFirewallRule,
|
||||
Deleter: deleteFirewallRule,
|
||||
Obj: fr,
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Found resource: %s", fr.SelfLink)
|
||||
resourceTrackers = append(resourceTrackers, resourceTracker)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing FirewallRules: %v", err)
|
||||
}
|
||||
|
||||
return resourceTrackers, nil
|
||||
}
|
||||
|
||||
// deleteFirewallRule is the helper function to delete a tracker.Resource for a Firewall object
|
||||
func deleteFirewallRule(cloud fi.Cloud, r *tracker.Resource) error {
|
||||
c := cloud.(gce.GCECloud)
|
||||
t := r.Obj.(*compute.Firewall)
|
||||
|
||||
glog.V(2).Infof("Deleting GCE FirewallRule %s", t.SelfLink)
|
||||
u, err := gce.ParseGoogleCloudURL(t.SelfLink)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
op, err := c.Compute().Firewalls.Delete(u.Project, u.Name).Do()
|
||||
if err != nil {
|
||||
if gce.IsNotFound(err) {
|
||||
glog.Infof("FirewallRule not found, assuming deleted: %q", t.SelfLink)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error deleting FirewallRule %s: %v", t.SelfLink, err)
|
||||
}
|
||||
|
||||
return c.WaitForOp(op)
|
||||
}
|
||||
|
||||
func (d *clusterDiscoveryGCE) listRoutes(resources map[string]*tracker.Resource) ([]*tracker.Resource, error) {
|
||||
c := d.gceCloud
|
||||
|
||||
|
@ -625,13 +698,28 @@ func deleteAddress(cloud fi.Cloud, r *tracker.Resource) error {
|
|||
}
|
||||
|
||||
func (d *clusterDiscoveryGCE) matchesClusterName(name string) bool {
|
||||
firstDash := strings.Index(name, "-")
|
||||
if firstDash == -1 {
|
||||
return false
|
||||
}
|
||||
return d.matchesClusterNameMultipart(name, 1)
|
||||
}
|
||||
|
||||
id := name[:firstDash]
|
||||
return name == gce.SafeObjectName(id, d.clusterName)
|
||||
// matchesClusterNameMultipart checks if the name could have been generated by our cluster
|
||||
// considering all the prefixes separated by `-`. maxParts limits the number of parts we consider.
|
||||
func (d *clusterDiscoveryGCE) matchesClusterNameMultipart(name string, maxParts int) bool {
|
||||
tokens := strings.Split(name, "-")
|
||||
|
||||
for i := 1; i <= maxParts; i++ {
|
||||
if i > len(tokens) {
|
||||
break
|
||||
}
|
||||
|
||||
id := strings.Join(tokens[:i], "-")
|
||||
if id == "" {
|
||||
continue
|
||||
}
|
||||
if name == gce.SafeObjectName(id, d.clusterName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *clusterDiscoveryGCE) listGCEDNSZone() ([]*tracker.Resource, error) {
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resources
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestNameMatch(t *testing.T) {
|
||||
grid := []struct {
|
||||
Name string
|
||||
Match bool
|
||||
}{
|
||||
{
|
||||
Name: "nodeport-external-to-node-cluster-example-com",
|
||||
Match: true,
|
||||
},
|
||||
{
|
||||
Name: "simple-cluster-example-com",
|
||||
Match: true,
|
||||
},
|
||||
{
|
||||
Name: "-cluster-example-com",
|
||||
Match: false,
|
||||
},
|
||||
{
|
||||
Name: "cluster-example-com",
|
||||
Match: false,
|
||||
},
|
||||
{
|
||||
Name: "a-example-com",
|
||||
Match: false,
|
||||
},
|
||||
{
|
||||
Name: "-example-com",
|
||||
Match: false,
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Match: false,
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
d := &clusterDiscoveryGCE{
|
||||
clusterName: "cluster.example.com",
|
||||
}
|
||||
match := d.matchesClusterNameMultipart(g.Name, maxPrefixTokens)
|
||||
if match != g.Match {
|
||||
t.Errorf("unexpected match value for %q, got %v, expected %v", g.Name, match, g.Match)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -82,10 +82,3 @@
|
|||
members: [a1 b1]
|
||||
- name: events
|
||||
members: [a1 b1]
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -39,14 +39,15 @@ func NewTemplater() *Templater {
|
|||
}
|
||||
|
||||
// Render is responsible for actually rendering the template
|
||||
func (r *Templater) Render(content string, context map[string]interface{}, snippets map[string]string) (rendered string, err error) {
|
||||
|
||||
func (r *Templater) Render(content string, context map[string]interface{}, snippets map[string]string, failOnMissing bool) (rendered string, err error) {
|
||||
// @step: create the template
|
||||
tm := template.New(templateName)
|
||||
if _, err = tm.Funcs(r.templateFuncsMap(tm)).Parse(content); err != nil {
|
||||
return
|
||||
}
|
||||
tm.Option("missingkey=error")
|
||||
if failOnMissing {
|
||||
tm.Option("missingkey=error")
|
||||
}
|
||||
|
||||
// @step: add the snippits into the mix
|
||||
for filename, snippet := range snippets {
|
||||
|
|
|
@ -147,6 +147,18 @@ func TestRenderContext(t *testing.T) {
|
|||
makeRenderTests(t, cases)
|
||||
}
|
||||
|
||||
func TestAllowForMissingVars(t *testing.T) {
|
||||
cases := []renderTest{
|
||||
{
|
||||
Context: map[string]interface{}{},
|
||||
Template: `{{ default "is missing" .name }}`,
|
||||
Expected: "is missing",
|
||||
DisableMissing: true,
|
||||
},
|
||||
}
|
||||
makeRenderTests(t, cases)
|
||||
}
|
||||
|
||||
func TestRenderIntegration(t *testing.T) {
|
||||
var cases []renderTest
|
||||
content, err := ioutil.ReadFile("integration_tests.yml")
|
||||
|
@ -161,17 +173,18 @@ func TestRenderIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
type renderTest struct {
|
||||
Expected string
|
||||
Snippets map[string]string
|
||||
Context map[string]interface{}
|
||||
Template string
|
||||
NotOK bool
|
||||
Context map[string]interface{}
|
||||
DisableMissing bool
|
||||
Expected string
|
||||
NotOK bool
|
||||
Snippets map[string]string
|
||||
Template string
|
||||
}
|
||||
|
||||
func makeRenderTests(t *testing.T, tests []renderTest) {
|
||||
r := NewTemplater()
|
||||
for i, x := range tests {
|
||||
render, err := r.Render(x.Template, x.Context, x.Snippets)
|
||||
render, err := r.Render(x.Template, x.Context, x.Snippets, !x.DisableMissing)
|
||||
if x.NotOK {
|
||||
if err == nil {
|
||||
t.Errorf("case %d: should have thrown an error", i)
|
||||
|
|
|
@ -61,7 +61,8 @@ func run() error {
|
|||
var zones []string
|
||||
var applyTaints, initializeRBAC, containerized, master bool
|
||||
var cloud, clusterID, dnsServer, dnsProviderID, dnsInternalSuffix, gossipSecret, gossipListen string
|
||||
var flagChannels, tlsCert, tlsKey, tlsCA, peerCert, peerKey, peerCA, etcdImageSource string
|
||||
var flagChannels, tlsCert, tlsKey, tlsCA, peerCert, peerKey, peerCA string
|
||||
var etcdImageSource, etcdElectionTimeout, etcdHeartbeatInterval string
|
||||
|
||||
flag.BoolVar(&applyTaints, "apply-taints", applyTaints, "Apply taints to nodes based on the role")
|
||||
flag.BoolVar(&containerized, "containerized", containerized, "Set if we are running containerized.")
|
||||
|
@ -82,6 +83,8 @@ func run() error {
|
|||
flags.StringSliceVarP(&zones, "zone", "z", []string{}, "Configure permitted zones and their mappings")
|
||||
flags.StringVar(&dnsProviderID, "dns", "aws-route53", "DNS provider we should use (aws-route53, google-clouddns, coredns)")
|
||||
flags.StringVar(&etcdImageSource, "etcd-image", "gcr.io/google_containers/etcd:2.2.1", "Etcd Source Container Registry")
|
||||
flags.StringVar(&etcdElectionTimeout, "etcd-election-timeout", etcdElectionTimeout, "time in ms for an election to timeout")
|
||||
flags.StringVar(&etcdHeartbeatInterval, "etcd-heartbeat-interval", etcdHeartbeatInterval, "time in ms of a heartbeat interval")
|
||||
flags.StringVar(&gossipSecret, "gossip-secret", gossipSecret, "Secret to use to secure gossip")
|
||||
|
||||
// Trick to avoid 'logging before flag.Parse' warning
|
||||
|
@ -282,22 +285,24 @@ func run() error {
|
|||
}
|
||||
|
||||
k := &protokube.KubeBoot{
|
||||
ApplyTaints: applyTaints,
|
||||
Channels: channels,
|
||||
DNS: dnsProvider,
|
||||
EtcdImageSource: etcdImageSource,
|
||||
InitializeRBAC: initializeRBAC,
|
||||
InternalDNSSuffix: dnsInternalSuffix,
|
||||
InternalIP: internalIP,
|
||||
Kubernetes: protokube.NewKubernetesContext(),
|
||||
Master: master,
|
||||
ModelDir: modelDir,
|
||||
PeerCA: peerCA,
|
||||
PeerCert: peerCert,
|
||||
PeerKey: peerKey,
|
||||
TLSCA: tlsCA,
|
||||
TLSCert: tlsCert,
|
||||
TLSKey: tlsKey,
|
||||
ApplyTaints: applyTaints,
|
||||
Channels: channels,
|
||||
DNS: dnsProvider,
|
||||
EtcdImageSource: etcdImageSource,
|
||||
EtcdElectionTimeout: etcdElectionTimeout,
|
||||
EtcdHeartbeatInterval: etcdHeartbeatInterval,
|
||||
InitializeRBAC: initializeRBAC,
|
||||
InternalDNSSuffix: dnsInternalSuffix,
|
||||
InternalIP: internalIP,
|
||||
Kubernetes: protokube.NewKubernetesContext(),
|
||||
Master: master,
|
||||
ModelDir: modelDir,
|
||||
PeerCA: peerCA,
|
||||
PeerCert: peerCert,
|
||||
PeerKey: peerKey,
|
||||
TLSCA: tlsCA,
|
||||
TLSCert: tlsCert,
|
||||
TLSKey: tlsKey,
|
||||
}
|
||||
|
||||
k.Init(volumes)
|
||||
|
|
|
@ -73,6 +73,10 @@ type EtcdCluster struct {
|
|||
PeerCert string
|
||||
// PeerKey is the path to a peer ca for etcd
|
||||
PeerKey string
|
||||
// ElectionTimeout is the leader election timeout
|
||||
ElectionTimeout string
|
||||
// HeartbeatInterval is the heartbeat interval
|
||||
HeartbeatInterval string
|
||||
}
|
||||
|
||||
// EtcdNode is a definition for the etcd node
|
||||
|
@ -97,21 +101,23 @@ func newEtcdController(kubeBoot *KubeBoot, v *Volume, spec *etcd.EtcdClusterSpec
|
|||
|
||||
cluster := &EtcdCluster{
|
||||
// @TODO we need to deprecate this port and use 2379, but that would be a breaking change
|
||||
ClientPort: 4001,
|
||||
ClusterName: "etcd-" + spec.ClusterKey,
|
||||
CPURequest: resource.MustParse("200m"),
|
||||
DataDirName: "data-" + spec.ClusterKey,
|
||||
ImageSource: kubeBoot.EtcdImageSource,
|
||||
TLSCA: kubeBoot.TLSCA,
|
||||
TLSCert: kubeBoot.TLSCert,
|
||||
TLSKey: kubeBoot.TLSKey,
|
||||
PeerCA: kubeBoot.PeerCA,
|
||||
PeerCert: kubeBoot.PeerCert,
|
||||
PeerKey: kubeBoot.PeerKey,
|
||||
PeerPort: 2380,
|
||||
PodName: "etcd-server-" + spec.ClusterKey,
|
||||
Spec: spec,
|
||||
VolumeMountPath: v.Mountpoint,
|
||||
ClientPort: 4001,
|
||||
ClusterName: "etcd-" + spec.ClusterKey,
|
||||
CPURequest: resource.MustParse("200m"),
|
||||
DataDirName: "data-" + spec.ClusterKey,
|
||||
ImageSource: kubeBoot.EtcdImageSource,
|
||||
TLSCA: kubeBoot.TLSCA,
|
||||
TLSCert: kubeBoot.TLSCert,
|
||||
TLSKey: kubeBoot.TLSKey,
|
||||
PeerCA: kubeBoot.PeerCA,
|
||||
PeerCert: kubeBoot.PeerCert,
|
||||
PeerKey: kubeBoot.PeerKey,
|
||||
PeerPort: 2380,
|
||||
PodName: "etcd-server-" + spec.ClusterKey,
|
||||
Spec: spec,
|
||||
VolumeMountPath: v.Mountpoint,
|
||||
ElectionTimeout: kubeBoot.EtcdElectionTimeout,
|
||||
HeartbeatInterval: kubeBoot.EtcdHeartbeatInterval,
|
||||
}
|
||||
|
||||
// We used to build this through text files ... it turns out to just be more complicated than code!
|
||||
|
|
|
@ -48,7 +48,7 @@ func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
|
|||
"/bin/sh", "-c", "/usr/local/bin/etcd 2>&1 | /bin/tee -a /var/log/etcd.log",
|
||||
},
|
||||
}
|
||||
// build the the environment variables for etcd service
|
||||
// build the environment variables for etcd service
|
||||
container.Env = buildEtcdEnvironmentOptions(c)
|
||||
|
||||
container.LivenessProbe = &v1.Probe{
|
||||
|
@ -170,6 +170,14 @@ func buildEtcdEnvironmentOptions(c *EtcdCluster) []v1.EnvVar {
|
|||
{Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"},
|
||||
{Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: c.ClusterToken}}...)
|
||||
|
||||
// add timeout/hearbeat settings
|
||||
if notEmpty(c.ElectionTimeout) {
|
||||
options = append(options, v1.EnvVar{Name: "ETCD_ELECTION_TIMEOUT", Value: c.ElectionTimeout})
|
||||
}
|
||||
if notEmpty(c.HeartbeatInterval) {
|
||||
options = append(options, v1.EnvVar{Name: "ETCD_HEARTBEAT_INTERVAL", Value: c.HeartbeatInterval})
|
||||
}
|
||||
|
||||
// @check if we are using peer certificates
|
||||
if notEmpty(c.PeerCA) {
|
||||
options = append(options, []v1.EnvVar{
|
||||
|
|
|
@ -50,6 +50,10 @@ type KubeBoot struct {
|
|||
ModelDir string
|
||||
// Etcd container registry location.
|
||||
EtcdImageSource string
|
||||
// EtcdElectionTimeout is is the leader election timeout
|
||||
EtcdElectionTimeout string
|
||||
// EtcdHeartbeatInterval is the heartbeat interval
|
||||
EtcdHeartbeatInterval string
|
||||
// TLSCA is the path to a client ca for etcd
|
||||
TLSCA string
|
||||
// TLSCert is the path to a tls certificate for etcd
|
||||
|
|
|
@ -34,6 +34,7 @@ func TestBuildEtcdManifest(t *testing.T) {
|
|||
}{
|
||||
{TestFile: "non_tls.yaml"},
|
||||
{TestFile: "tls.yaml"},
|
||||
{TestFile: "etcd_env_vars.yaml"},
|
||||
}
|
||||
for i, x := range cs {
|
||||
cluster, expected := loadTestIntegration(t, path.Join("main", x.TestFile))
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
clientPort: 4001
|
||||
clusterName: etcd-main
|
||||
clusterToken: token-main
|
||||
cpuRequest: "200m"
|
||||
dataDirName: data-main
|
||||
imageSource: gcr.io/google_containers/etcd:2.2.1
|
||||
logFile: /var/log/etcd.log
|
||||
peerPort: 2380
|
||||
podName: etcd-server-main
|
||||
volumeMountPath: /mnt/main
|
||||
electionTimeout: "1000"
|
||||
heartbeatInterval: "100"
|
||||
me:
|
||||
name: node0
|
||||
internalName: node0.internal
|
||||
nodes:
|
||||
- name: node0
|
||||
internalName: node0.internal
|
||||
- name: node1
|
||||
internalName: node1.internal
|
||||
- name: node2
|
||||
internalName: node2.internal
|
||||
spec: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
k8s-app: etcd-server-main
|
||||
name: etcd-server-main
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/etcd 2>&1 | /bin/tee -a /var/log/etcd.log
|
||||
env:
|
||||
- name: ETCD_NAME
|
||||
value: node0
|
||||
- name: ETCD_DATA_DIR
|
||||
value: /var/etcd/data-main
|
||||
- name: ETCD_LISTEN_PEER_URLS
|
||||
value: http://0.0.0.0:2380
|
||||
- name: ETCD_LISTEN_CLIENT_URLS
|
||||
value: http://0.0.0.0:4001
|
||||
- name: ETCD_ADVERTISE_CLIENT_URLS
|
||||
value: http://node0.internal:4001
|
||||
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
|
||||
value: http://node0.internal:2380
|
||||
- name: ETCD_INITIAL_CLUSTER_STATE
|
||||
value: new
|
||||
- name: ETCD_INITIAL_CLUSTER_TOKEN
|
||||
value: token-main
|
||||
- name: ETCD_ELECTION_TIMEOUT
|
||||
value: "1000"
|
||||
- name: ETCD_HEARTBEAT_INTERVAL
|
||||
value: "100"
|
||||
- name: ETCD_INITIAL_CLUSTER
|
||||
value: node0=http://node0.internal:2380,node1=http://node1.internal:2380,node2=http://node2.internal:2380
|
||||
image: gcr.io/google_containers/etcd:2.2.1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 4001
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
name: etcd-container
|
||||
ports:
|
||||
- containerPort: 2380
|
||||
hostPort: 2380
|
||||
name: serverport
|
||||
- containerPort: 4001
|
||||
hostPort: 4001
|
||||
name: clientport
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
volumeMounts:
|
||||
- mountPath: /var/etcd/data-main
|
||||
name: varetcdata
|
||||
- mountPath: /var/log/etcd.log
|
||||
name: varlogetcd
|
||||
- mountPath: /etc/hosts
|
||||
name: hosts
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /mnt/main/var/etcd/data-main
|
||||
name: varetcdata
|
||||
- hostPath:
|
||||
path: /var/log/etcd.log
|
||||
name: varlogetcd
|
||||
- hostPath:
|
||||
path: /etc/hosts
|
||||
name: hosts
|
||||
status: {}
|
|
@ -429,6 +429,7 @@ resource "aws_subnet" "us-test-1a-complex-example-com" {
|
|||
KubernetesCluster = "complex.example.com"
|
||||
Name = "us-test-1a.complex.example.com"
|
||||
"kubernetes.io/cluster/complex.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -535,6 +535,7 @@ resource "aws_subnet" "us-test-1a-ha-example-com" {
|
|||
KubernetesCluster = "ha.example.com"
|
||||
Name = "us-test-1a.ha.example.com"
|
||||
"kubernetes.io/cluster/ha.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -547,6 +548,7 @@ resource "aws_subnet" "us-test-1b-ha-example-com" {
|
|||
KubernetesCluster = "ha.example.com"
|
||||
Name = "us-test-1b.ha.example.com"
|
||||
"kubernetes.io/cluster/ha.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -559,6 +561,7 @@ resource "aws_subnet" "us-test-1c-ha-example-com" {
|
|||
KubernetesCluster = "ha.example.com"
|
||||
Name = "us-test-1c.ha.example.com"
|
||||
"kubernetes.io/cluster/ha.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -365,6 +365,7 @@ resource "aws_subnet" "us-test-1a-minimal-141-example-com" {
|
|||
KubernetesCluster = "minimal-141.example.com"
|
||||
Name = "us-test-1a.minimal-141.example.com"
|
||||
"kubernetes.io/cluster/minimal-141.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -401,6 +401,10 @@
|
|||
{
|
||||
"Key": "kubernetes.io/cluster/minimal.example.com",
|
||||
"Value": "owned"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/role/elb",
|
||||
"Value": "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -581,22 +585,6 @@
|
|||
],
|
||||
"Sid": "kopsMasterCertIAMPerms"
|
||||
},
|
||||
{
|
||||
"Action": [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetRepositoryPolicy",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecr:ListImages",
|
||||
"ecr:BatchGetImage"
|
||||
],
|
||||
"Effect": "Allow",
|
||||
"Resource": [
|
||||
"*"
|
||||
],
|
||||
"Sid": "kopsK8sECR"
|
||||
},
|
||||
{
|
||||
"Action": [
|
||||
"route53:ChangeResourceRecordSets",
|
||||
|
@ -638,6 +626,22 @@
|
|||
"*"
|
||||
],
|
||||
"Sid": ""
|
||||
},
|
||||
{
|
||||
"Action": [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetRepositoryPolicy",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecr:ListImages",
|
||||
"ecr:BatchGetImage"
|
||||
],
|
||||
"Effect": "Allow",
|
||||
"Resource": [
|
||||
"*"
|
||||
],
|
||||
"Sid": "kopsK8sECR"
|
||||
}
|
||||
],
|
||||
"Version": "2012-10-17"
|
||||
|
@ -665,22 +669,6 @@
|
|||
],
|
||||
"Sid": "kopsK8sEC2NodePerms"
|
||||
},
|
||||
{
|
||||
"Action": [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetRepositoryPolicy",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecr:ListImages",
|
||||
"ecr:BatchGetImage"
|
||||
],
|
||||
"Effect": "Allow",
|
||||
"Resource": [
|
||||
"*"
|
||||
],
|
||||
"Sid": "kopsK8sECR"
|
||||
},
|
||||
{
|
||||
"Action": [
|
||||
"route53:ChangeResourceRecordSets",
|
||||
|
@ -722,6 +710,22 @@
|
|||
"*"
|
||||
],
|
||||
"Sid": ""
|
||||
},
|
||||
{
|
||||
"Action": [
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetRepositoryPolicy",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecr:ListImages",
|
||||
"ecr:BatchGetImage"
|
||||
],
|
||||
"Effect": "Allow",
|
||||
"Resource": [
|
||||
"*"
|
||||
],
|
||||
"Sid": "kopsK8sECR"
|
||||
}
|
||||
],
|
||||
"Version": "2012-10-17"
|
||||
|
|
|
@ -365,6 +365,7 @@ resource "aws_subnet" "us-test-1a-minimal-example-com" {
|
|||
KubernetesCluster = "minimal.example.com"
|
||||
Name = "us-test-1a.minimal.example.com"
|
||||
"kubernetes.io/cluster/minimal.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -645,6 +645,7 @@ resource "aws_subnet" "us-test-1a-privatecalico-example-com" {
|
|||
KubernetesCluster = "privatecalico.example.com"
|
||||
Name = "us-test-1a.privatecalico.example.com"
|
||||
"kubernetes.io/cluster/privatecalico.example.com" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -657,6 +658,7 @@ resource "aws_subnet" "utility-us-test-1a-privatecalico-example-com" {
|
|||
KubernetesCluster = "privatecalico.example.com"
|
||||
Name = "utility-us-test-1a.privatecalico.example.com"
|
||||
"kubernetes.io/cluster/privatecalico.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -636,6 +636,7 @@ resource "aws_subnet" "us-test-1a-privatecanal-example-com" {
|
|||
KubernetesCluster = "privatecanal.example.com"
|
||||
Name = "us-test-1a.privatecanal.example.com"
|
||||
"kubernetes.io/cluster/privatecanal.example.com" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -648,6 +649,7 @@ resource "aws_subnet" "utility-us-test-1a-privatecanal-example-com" {
|
|||
KubernetesCluster = "privatecanal.example.com"
|
||||
Name = "utility-us-test-1a.privatecanal.example.com"
|
||||
"kubernetes.io/cluster/privatecanal.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -641,6 +641,7 @@ resource "aws_subnet" "us-test-1a-privatedns1-example-com" {
|
|||
KubernetesCluster = "privatedns1.example.com"
|
||||
Name = "us-test-1a.privatedns1.example.com"
|
||||
"kubernetes.io/cluster/privatedns1.example.com" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -653,6 +654,7 @@ resource "aws_subnet" "utility-us-test-1a-privatedns1-example-com" {
|
|||
KubernetesCluster = "privatedns1.example.com"
|
||||
Name = "utility-us-test-1a.privatedns1.example.com"
|
||||
"kubernetes.io/cluster/privatedns1.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -627,6 +627,7 @@ resource "aws_subnet" "us-test-1a-privatedns2-example-com" {
|
|||
KubernetesCluster = "privatedns2.example.com"
|
||||
Name = "us-test-1a.privatedns2.example.com"
|
||||
"kubernetes.io/cluster/privatedns2.example.com" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -639,6 +640,7 @@ resource "aws_subnet" "utility-us-test-1a-privatedns2-example-com" {
|
|||
KubernetesCluster = "privatedns2.example.com"
|
||||
Name = "utility-us-test-1a.privatedns2.example.com"
|
||||
"kubernetes.io/cluster/privatedns2.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -636,6 +636,7 @@ resource "aws_subnet" "us-test-1a-privateflannel-example-com" {
|
|||
KubernetesCluster = "privateflannel.example.com"
|
||||
Name = "us-test-1a.privateflannel.example.com"
|
||||
"kubernetes.io/cluster/privateflannel.example.com" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -648,6 +649,7 @@ resource "aws_subnet" "utility-us-test-1a-privateflannel-example-com" {
|
|||
KubernetesCluster = "privateflannel.example.com"
|
||||
Name = "utility-us-test-1a.privateflannel.example.com"
|
||||
"kubernetes.io/cluster/privateflannel.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -627,6 +627,7 @@ resource "aws_subnet" "us-test-1a-privatekopeio-example-com" {
|
|||
KubernetesCluster = "privatekopeio.example.com"
|
||||
Name = "us-test-1a.privatekopeio.example.com"
|
||||
"kubernetes.io/cluster/privatekopeio.example.com" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -639,6 +640,7 @@ resource "aws_subnet" "utility-us-test-1a-privatekopeio-example-com" {
|
|||
KubernetesCluster = "privatekopeio.example.com"
|
||||
Name = "utility-us-test-1a.privatekopeio.example.com"
|
||||
"kubernetes.io/cluster/privatekopeio.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -636,6 +636,7 @@ resource "aws_subnet" "us-test-1a-privateweave-example-com" {
|
|||
KubernetesCluster = "privateweave.example.com"
|
||||
Name = "us-test-1a.privateweave.example.com"
|
||||
"kubernetes.io/cluster/privateweave.example.com" = "owned"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -648,6 +649,7 @@ resource "aws_subnet" "utility-us-test-1a-privateweave-example-com" {
|
|||
KubernetesCluster = "privateweave.example.com"
|
||||
Name = "utility-us-test-1a.privateweave.example.com"
|
||||
"kubernetes.io/cluster/privateweave.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -356,6 +356,7 @@ resource "aws_subnet" "us-test-1a-sharedvpc-example-com" {
|
|||
KubernetesCluster = "sharedvpc.example.com"
|
||||
Name = "us-test-1a.sharedvpc.example.com"
|
||||
"kubernetes.io/cluster/sharedvpc.example.com" = "owned"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:cloud-controller-manager
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cloud-controller-manager
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: system:cloud-controller-manager
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:cloud-controller-manager
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cloud-controller-manager
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: cloud-controller-manager
|
||||
name: cloud-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cloud-controller-manager
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: cloud-controller-manager
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
serviceAccountName: cloud-controller-manager
|
||||
containers:
|
||||
- name: cloud-controller-manager
|
||||
# for in-tree providers we use gcr.io/google_containers/cloud-controller-manager
|
||||
# this can be replaced with any other image for out-of-tree providers
|
||||
image: gcr.io/google_containers/cloud-controller-manager:v{{ .KubernetesVersion }} # Reviewers: Will this work?
|
||||
command:
|
||||
- /usr/local/bin/cloud-controller-manager
|
||||
- --cloud-provider={{ .CloudProvider }}
|
||||
- --leader-elect=true
|
||||
- --use-service-account-credentials
|
||||
# these flags will vary for every cloud provider
|
||||
- --allocate-node-cidrs=true
|
||||
- --configure-cloud-routes=true
|
||||
- --cluster-cidr={{ .KubeControllerManager.ClusterCIDR }}
|
||||
tolerations:
|
||||
# this is required so CCM can bootstrap itself
|
||||
- key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
# this is to have the daemonset runnable on master nodes
|
||||
# the taint may vary depending on your cluster setup
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
# this is to restrict CCM to only run on master nodes
|
||||
# the node selector may vary depending on your cluster setup
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
|
@ -28,7 +28,7 @@ spec:
|
|||
memory: 100Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: kopeio/networking-agent:1.0.20170406
|
||||
image: kopeio/networking-agent:1.0.20171015
|
||||
name: networking-agent
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
|
|
|
@ -28,7 +28,7 @@ spec:
|
|||
memory: 100Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: kopeio/networking-agent:1.0.20170406
|
||||
image: kopeio/networking-agent:1.0.20171015
|
||||
name: networking-agent
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
# Source: https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/rbac/kubelet-binding.yaml
|
||||
# The GKE environments don't have kubelets with certificates that
|
||||
# identify the system:nodes group. They use the kubelet identity
|
||||
# TODO: remove this once new nodes are granted individual identities and the
|
||||
# NodeAuthorizer is enabled.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubelet-cluster-admin
|
||||
labels:
|
||||
k8s-addon: rbac.addons.k8s.io
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:node
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: kubelet
|
|
@ -0,0 +1,23 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: default
|
||||
labels:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
provisioner: kubernetes.io/aws-ebs
|
||||
parameters:
|
||||
type: gp2
|
||||
|
||||
---
|
||||
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: gp2
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
provisioner: kubernetes.io/aws-ebs
|
||||
parameters:
|
||||
type: gp2
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: standard
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
k8s-addon: storage-gce.addons.k8s.io
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
provisioner: kubernetes.io/gce-pd
|
||||
parameters:
|
||||
type: pd-standard
|
|
@ -49,7 +49,7 @@ type Keystore interface {
|
|||
// (if the certificate is found but not keypair, that is not an error: only the cert will be returned)
|
||||
FindKeypair(name string) (*pki.Certificate, *pki.PrivateKey, error)
|
||||
|
||||
CreateKeypair(name string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error)
|
||||
CreateKeypair(signer string, name string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error)
|
||||
|
||||
// StoreKeypair writes the keypair to the store
|
||||
StoreKeypair(id string, cert *pki.Certificate, privateKey *pki.PrivateKey) error
|
||||
|
@ -67,11 +67,13 @@ type CAStore interface {
|
|||
Keystore
|
||||
|
||||
// Cert returns the primary specified certificate
|
||||
// For createIfMissing=false, using FindCert is preferred
|
||||
Cert(name string, createIfMissing bool) (*pki.Certificate, error)
|
||||
// CertificatePool returns all active certificates with the specified id
|
||||
CertificatePool(name string, createIfMissing bool) (*CertificatePool, error)
|
||||
PrivateKey(name string, createIfMissing bool) (*pki.PrivateKey, error)
|
||||
|
||||
// FindCert returns the specified certificate, if it exists, or nil if not found
|
||||
FindCert(name string) (*pki.Certificate, error)
|
||||
FindPrivateKey(name string) (*pki.PrivateKey, error)
|
||||
|
||||
|
|
|
@ -42,8 +42,8 @@ type ClientsetCAStore struct {
|
|||
namespace string
|
||||
clientset kopsinternalversion.KopsInterface
|
||||
|
||||
mutex sync.Mutex
|
||||
cacheCaKeyset *keyset
|
||||
mutex sync.Mutex
|
||||
cachedCaKeysets map[string]*keyset
|
||||
}
|
||||
|
||||
var _ CAStore = &ClientsetCAStore{}
|
||||
|
@ -51,42 +51,44 @@ var _ CAStore = &ClientsetCAStore{}
|
|||
// NewClientsetCAStore is the constructor for ClientsetCAStore
|
||||
func NewClientsetCAStore(clientset kopsinternalversion.KopsInterface, namespace string) CAStore {
|
||||
c := &ClientsetCAStore{
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
cachedCaKeysets: make(map[string]*keyset),
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// readCAKeypairs retrieves the CA keypair, generating a new keypair if not found
|
||||
func (c *ClientsetCAStore) readCAKeypairs() (*keyset, error) {
|
||||
func (c *ClientsetCAStore) readCAKeypairs(id string) (*keyset, error) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
if c.cacheCaKeyset != nil {
|
||||
return c.cacheCaKeyset, nil
|
||||
cached := c.cachedCaKeysets[id]
|
||||
if cached != nil {
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
keyset, err := c.loadKeyset(CertificateId_CA)
|
||||
keyset, err := c.loadKeyset(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if keyset == nil {
|
||||
keyset, err = c.generateCACertificate()
|
||||
keyset, err = c.generateCACertificate(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
c.cacheCaKeyset = keyset
|
||||
c.cachedCaKeysets[id] = keyset
|
||||
|
||||
return keyset, nil
|
||||
}
|
||||
|
||||
// generateCACertificate creates and stores a CA keypair
|
||||
// Should be called with the mutex held, to prevent concurrent creation of different keys
|
||||
func (c *ClientsetCAStore) generateCACertificate() (*keyset, error) {
|
||||
func (c *ClientsetCAStore) generateCACertificate(id string) (*keyset, error) {
|
||||
template := BuildCAX509Template()
|
||||
|
||||
caRsaKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
|
||||
|
@ -104,7 +106,7 @@ func (c *ClientsetCAStore) generateCACertificate() (*keyset, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return c.storeAndVerifyKeypair(CertificateId_CA, caCertificate, caPrivateKey)
|
||||
return c.storeAndVerifyKeypair(id, caCertificate, caPrivateKey)
|
||||
}
|
||||
|
||||
// keyset is a parsed Keyset
|
||||
|
@ -310,12 +312,12 @@ func (c *ClientsetCAStore) List() ([]*KeystoreItem, error) {
|
|||
}
|
||||
|
||||
// IssueCert implements CAStore::IssueCert
|
||||
func (c *ClientsetCAStore) IssueCert(name string, serial *big.Int, privateKey *pki.PrivateKey, template *x509.Certificate) (*pki.Certificate, error) {
|
||||
func (c *ClientsetCAStore) IssueCert(signer string, name string, serial *big.Int, privateKey *pki.PrivateKey, template *x509.Certificate) (*pki.Certificate, error) {
|
||||
glog.Infof("Issuing new certificate: %q", name)
|
||||
|
||||
template.SerialNumber = serial
|
||||
|
||||
caKeyset, err := c.readCAKeypairs()
|
||||
caKeyset, err := c.readCAKeypairs(signer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -416,10 +418,10 @@ func (c *ClientsetCAStore) PrivateKey(name string, createIfMissing bool) (*pki.P
|
|||
}
|
||||
|
||||
// CreateKeypair implements CAStore::CreateKeypair
|
||||
func (c *ClientsetCAStore) CreateKeypair(id string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error) {
|
||||
func (c *ClientsetCAStore) CreateKeypair(signer string, id string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error) {
|
||||
serial := c.buildSerial()
|
||||
|
||||
cert, err := c.IssueCert(id, serial, privateKey, template)
|
||||
cert, err := c.IssueCert(signer, id, serial, privateKey, template)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -73,6 +73,7 @@ go_library(
|
|||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"bootstrapchannelbuilder_test.go",
|
||||
"deepvalidate_test.go",
|
||||
|
@ -85,6 +86,9 @@ go_test(
|
|||
"tagbuilder_test.go",
|
||||
"validation_test.go",
|
||||
],
|
||||
data = [
|
||||
"//upup/pkg/fi/cloudup/tests:exported_testdata", # keep
|
||||
],
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/apis/kops:go_default_library",
|
||||
|
@ -103,7 +107,4 @@ go_test(
|
|||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
data = [
|
||||
"//upup/pkg/fi/cloudup/tests:exported_testdata", # keep
|
||||
],
|
||||
)
|
||||
|
|
|
@ -149,6 +149,26 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
}
|
||||
}
|
||||
|
||||
{
|
||||
key := "rbac.addons.k8s.io"
|
||||
version := "1.8.0"
|
||||
|
||||
{
|
||||
location := key + "/k8s-1.8.yaml"
|
||||
id := "k8s-1.8"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.8.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
key := "limit-range.addons.k8s.io"
|
||||
version := "1.5.0"
|
||||
|
@ -238,32 +258,72 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if kops.CloudProviderID(b.cluster.Spec.CloudProvider) == kops.CloudProviderAWS {
|
||||
key := "storage-aws.addons.k8s.io"
|
||||
version := "1.6.0"
|
||||
version := "1.7.0"
|
||||
|
||||
location := key + "/v" + version + ".yaml"
|
||||
{
|
||||
id := "v1.7.0"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
})
|
||||
manifests[key] = "addons/" + location
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
|
||||
{
|
||||
id := "v1.6.0"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
}
|
||||
|
||||
if kops.CloudProviderID(b.cluster.Spec.CloudProvider) == kops.CloudProviderGCE {
|
||||
key := "storage-gce.addons.k8s.io"
|
||||
version := "1.6.0"
|
||||
version := "1.7.0"
|
||||
|
||||
location := key + "/v" + version + ".yaml"
|
||||
{
|
||||
id := "v1.6.0"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
})
|
||||
manifests[key] = "addons/" + location
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
|
||||
{
|
||||
id := "v1.7.0"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
}
|
||||
|
||||
// The role.kubernetes.io/networking is used to label anything related to a networking addin,
|
||||
|
@ -285,7 +345,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if b.cluster.Spec.Networking.Kopeio != nil {
|
||||
key := "networking.kope.io"
|
||||
version := "1.0.20170406"
|
||||
version := "1.0.20171015"
|
||||
|
||||
{
|
||||
location := key + "/pre-k8s-1.6.yaml"
|
||||
|
@ -559,5 +619,25 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
}
|
||||
}
|
||||
|
||||
if featureflag.EnableExternalCloudController.Enabled() && b.cluster.Spec.ExternalCloudControllerManager != nil {
|
||||
{
|
||||
key := "core.addons.k8s.io"
|
||||
version := "1.7.0"
|
||||
|
||||
location := key + "/k8s-1.7.yaml"
|
||||
id := "k8s-1.7-ccm"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
}
|
||||
|
||||
return addons, manifests, nil
|
||||
}
|
||||
|
|
|
@ -99,11 +99,11 @@ func (_ *TargetPool) RenderGCE(t *gce.GCEAPITarget, a, e, changes *TargetPool) e
|
|||
}
|
||||
|
||||
type terraformTargetPool struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
HealthChecks []string `json:"health_checks,omitempty"`
|
||||
Instances []string `json:"instances,omitempty"`
|
||||
SessionAfinity string `json:"session_affnity,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
HealthChecks []string `json:"health_checks,omitempty"`
|
||||
Instances []string `json:"instances,omitempty"`
|
||||
SessionAffinity string `json:"session_affinity,omitempty"`
|
||||
}
|
||||
|
||||
func (_ *TargetPool) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *TargetPool) error {
|
||||
|
|
|
@ -23,6 +23,13 @@ spec:
|
|||
selector:
|
||||
k8s-addon: kube-dns.addons.k8s.io
|
||||
version: 1.14.5
|
||||
- id: k8s-1.8
|
||||
kubernetesVersion: '>=1.8.0'
|
||||
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
|
||||
name: rbac.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: rbac.addons.k8s.io
|
||||
version: 1.8.0
|
||||
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
|
||||
name: limit-range.addons.k8s.io
|
||||
selector:
|
||||
|
@ -42,22 +49,31 @@ spec:
|
|||
selector:
|
||||
k8s-addon: dns-controller.addons.k8s.io
|
||||
version: 1.7.1
|
||||
- manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
||||
- id: v1.7.0
|
||||
kubernetesVersion: '>=1.7.0'
|
||||
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
|
||||
name: storage-aws.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
version: 1.6.0
|
||||
version: 1.7.0
|
||||
- id: v1.6.0
|
||||
kubernetesVersion: <1.7.0
|
||||
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
||||
name: storage-aws.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
version: 1.7.0
|
||||
- id: pre-k8s-1.6
|
||||
kubernetesVersion: <1.6.0
|
||||
manifest: networking.kope.io/pre-k8s-1.6.yaml
|
||||
name: networking.kope.io
|
||||
selector:
|
||||
role.kubernetes.io/networking: "1"
|
||||
version: 1.0.20170406
|
||||
version: 1.0.20171015
|
||||
- id: k8s-1.6
|
||||
kubernetesVersion: '>=1.6.0'
|
||||
manifest: networking.kope.io/k8s-1.6.yaml
|
||||
name: networking.kope.io
|
||||
selector:
|
||||
role.kubernetes.io/networking: "1"
|
||||
version: 1.0.20170406
|
||||
version: 1.0.20171015
|
||||
|
|
|
@ -23,6 +23,13 @@ spec:
|
|||
selector:
|
||||
k8s-addon: kube-dns.addons.k8s.io
|
||||
version: 1.14.5
|
||||
- id: k8s-1.8
|
||||
kubernetesVersion: '>=1.8.0'
|
||||
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
|
||||
name: rbac.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: rbac.addons.k8s.io
|
||||
version: 1.8.0
|
||||
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
|
||||
name: limit-range.addons.k8s.io
|
||||
selector:
|
||||
|
@ -42,8 +49,17 @@ spec:
|
|||
selector:
|
||||
k8s-addon: dns-controller.addons.k8s.io
|
||||
version: 1.7.1
|
||||
- manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
||||
- id: v1.7.0
|
||||
kubernetesVersion: '>=1.7.0'
|
||||
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
|
||||
name: storage-aws.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
version: 1.6.0
|
||||
version: 1.7.0
|
||||
- id: v1.6.0
|
||||
kubernetesVersion: <1.7.0
|
||||
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
||||
name: storage-aws.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
version: 1.7.0
|
||||
|
|
|
@ -23,6 +23,13 @@ spec:
|
|||
selector:
|
||||
k8s-addon: kube-dns.addons.k8s.io
|
||||
version: 1.14.5
|
||||
- id: k8s-1.8
|
||||
kubernetesVersion: '>=1.8.0'
|
||||
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
|
||||
name: rbac.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: rbac.addons.k8s.io
|
||||
version: 1.8.0
|
||||
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
|
||||
name: limit-range.addons.k8s.io
|
||||
selector:
|
||||
|
@ -42,11 +49,20 @@ spec:
|
|||
selector:
|
||||
k8s-addon: dns-controller.addons.k8s.io
|
||||
version: 1.7.1
|
||||
- manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
||||
- id: v1.7.0
|
||||
kubernetesVersion: '>=1.7.0'
|
||||
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
|
||||
name: storage-aws.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
version: 1.6.0
|
||||
version: 1.7.0
|
||||
- id: v1.6.0
|
||||
kubernetesVersion: <1.7.0
|
||||
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
||||
name: storage-aws.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: storage-aws.addons.k8s.io
|
||||
version: 1.7.0
|
||||
- id: pre-k8s-1.6
|
||||
kubernetesVersion: <1.6.0
|
||||
manifest: networking.weave/pre-k8s-1.6.yaml
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
var wellKnownCertificateTypes = map[string]string{
|
||||
"client": "ExtKeyUsageClientAuth,KeyUsageDigitalSignature",
|
||||
"server": "ExtKeyUsageServerAuth,KeyUsageDigitalSignature,KeyUsageKeyEncipherment",
|
||||
"ca": "CA,KeyUsageCRLSign,KeyUsageCertSign",
|
||||
}
|
||||
|
||||
//go:generate fitask -type=Keypair
|
||||
|
@ -41,6 +42,9 @@ type Keypair struct {
|
|||
Type string `json:"type"`
|
||||
AlternateNames []string `json:"alternateNames"`
|
||||
AlternateNameTasks []fi.Task `json:"alternateNameTasks"`
|
||||
|
||||
// Signer is the keypair to use to sign, for when we want to use an alternative CA
|
||||
Signer *Keypair
|
||||
}
|
||||
|
||||
var _ fi.HasCheckExisting = &Keypair{}
|
||||
|
@ -51,6 +55,12 @@ func (e *Keypair) CheckExisting(c *fi.Context) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
var _ fi.CompareWithID = &Keypair{}
|
||||
|
||||
func (e *Keypair) CompareWithID() *string {
|
||||
return &e.Subject
|
||||
}
|
||||
|
||||
func (e *Keypair) Find(c *fi.Context) (*Keypair, error) {
|
||||
name := fi.StringValue(e.Name)
|
||||
if name == "" {
|
||||
|
@ -84,6 +94,8 @@ func (e *Keypair) Find(c *fi.Context) (*Keypair, error) {
|
|||
Type: buildTypeDescription(cert.Certificate),
|
||||
}
|
||||
|
||||
actual.Signer = &Keypair{Subject: pkixNameToString(&cert.Certificate.Issuer)}
|
||||
|
||||
// Avoid spurious changes
|
||||
actual.Lifecycle = e.Lifecycle
|
||||
|
||||
|
@ -133,7 +145,7 @@ func (e *Keypair) normalize(c *fi.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Keypair) CheckChanges(a, e, changes *Keypair) error {
|
||||
func (_ *Keypair) CheckChanges(a, e, changes *Keypair) error {
|
||||
if a != nil {
|
||||
if changes.Name != nil {
|
||||
return fi.CannotChangeField("Name")
|
||||
|
@ -184,7 +196,11 @@ func (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error {
|
|||
}
|
||||
}
|
||||
|
||||
cert, err = c.Keystore.CreateKeypair(name, template, privateKey)
|
||||
signer := fi.CertificateId_CA
|
||||
if e.Signer != nil {
|
||||
signer = fi.StringValue(e.Signer.Name)
|
||||
}
|
||||
cert, err = c.Keystore.CreateKeypair(signer, name, template, privateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -256,8 +272,10 @@ func buildCertificateTemplateForType(certificateType string) (*x509.Certificate,
|
|||
return nil, fmt.Errorf("unrecognized certificate option: %v", t)
|
||||
}
|
||||
template.ExtKeyUsage = append(template.ExtKeyUsage, ku)
|
||||
} else if t == "CA" {
|
||||
template.IsCA = true
|
||||
} else {
|
||||
return nil, fmt.Errorf("unrecognized certificate option: %v", t)
|
||||
return nil, fmt.Errorf("unrecognized certificate option: %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,12 +51,12 @@ func NewKubernetesKeystore(client kubernetes.Interface, namespace string) fi.Key
|
|||
return c
|
||||
}
|
||||
|
||||
func (c *KubernetesKeystore) issueCert(id string, serial *big.Int, privateKey *pki.PrivateKey, template *x509.Certificate) (*pki.Certificate, error) {
|
||||
func (c *KubernetesKeystore) issueCert(signer string, id string, serial *big.Int, privateKey *pki.PrivateKey, template *x509.Certificate) (*pki.Certificate, error) {
|
||||
glog.Infof("Issuing new certificate: %q", id)
|
||||
|
||||
template.SerialNumber = serial
|
||||
|
||||
caCert, caKey, err := c.FindKeypair(fi.CertificateId_CA)
|
||||
caCert, caKey, err := c.FindKeypair(signer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -107,11 +107,11 @@ func (c *KubernetesKeystore) FindKeypair(id string) (*pki.Certificate, *pki.Priv
|
|||
return keypair.Certificate, keypair.PrivateKey, nil
|
||||
}
|
||||
|
||||
func (c *KubernetesKeystore) CreateKeypair(id string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error) {
|
||||
func (c *KubernetesKeystore) CreateKeypair(signer string, id string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error) {
|
||||
t := time.Now().UnixNano()
|
||||
serial := pki.BuildPKISerial(t)
|
||||
|
||||
cert, err := c.issueCert(id, serial, privateKey, template)
|
||||
cert, err := c.issueCert(signer, id, serial, privateKey, template)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -40,16 +40,21 @@ import (
|
|||
type VFSCAStore struct {
|
||||
basedir vfs.Path
|
||||
|
||||
mutex sync.Mutex
|
||||
cacheCaCertificates *certificates
|
||||
cacheCaPrivateKeys *privateKeys
|
||||
mutex sync.Mutex
|
||||
cachedCAs map[string]*cachedEntry
|
||||
}
|
||||
|
||||
type cachedEntry struct {
|
||||
certificates *certificates
|
||||
privateKeys *privateKeys
|
||||
}
|
||||
|
||||
var _ CAStore = &VFSCAStore{}
|
||||
|
||||
func NewVFSCAStore(basedir vfs.Path) CAStore {
|
||||
c := &VFSCAStore{
|
||||
basedir: basedir,
|
||||
basedir: basedir,
|
||||
cachedCAs: make(map[string]*cachedEntry),
|
||||
}
|
||||
|
||||
return c
|
||||
|
@ -60,15 +65,16 @@ func (s *VFSCAStore) VFSPath() vfs.Path {
|
|||
}
|
||||
|
||||
// Retrieves the CA keypair, generating a new keypair if not found
|
||||
func (s *VFSCAStore) readCAKeypairs() (*certificates, *privateKeys, error) {
|
||||
func (s *VFSCAStore) readCAKeypairs(id string) (*certificates, *privateKeys, error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if s.cacheCaPrivateKeys != nil {
|
||||
return s.cacheCaCertificates, s.cacheCaPrivateKeys, nil
|
||||
cached := s.cachedCAs[id]
|
||||
if cached != nil {
|
||||
return cached.certificates, cached.privateKeys, nil
|
||||
}
|
||||
|
||||
caCertificates, err := s.loadCertificates(s.buildCertificatePoolPath(CertificateId_CA))
|
||||
caCertificates, err := s.loadCertificates(s.buildCertificatePoolPath(id))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -76,7 +82,7 @@ func (s *VFSCAStore) readCAKeypairs() (*certificates, *privateKeys, error) {
|
|||
var caPrivateKeys *privateKeys
|
||||
|
||||
if caCertificates != nil {
|
||||
caPrivateKeys, err = s.loadPrivateKeys(s.buildPrivateKeyPoolPath(CertificateId_CA))
|
||||
caPrivateKeys, err = s.loadPrivateKeys(s.buildPrivateKeyPoolPath(id))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -88,16 +94,16 @@ func (s *VFSCAStore) readCAKeypairs() (*certificates, *privateKeys, error) {
|
|||
}
|
||||
|
||||
if caPrivateKeys == nil {
|
||||
caCertificates, caPrivateKeys, err = s.generateCACertificate()
|
||||
caCertificates, caPrivateKeys, err = s.generateCACertificate(id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
}
|
||||
s.cacheCaCertificates = caCertificates
|
||||
s.cacheCaPrivateKeys = caPrivateKeys
|
||||
cached = &cachedEntry{certificates: caCertificates, privateKeys: caPrivateKeys}
|
||||
s.cachedCAs[id] = cached
|
||||
|
||||
return s.cacheCaCertificates, s.cacheCaPrivateKeys, nil
|
||||
return cached.certificates, cached.privateKeys, nil
|
||||
}
|
||||
|
||||
func BuildCAX509Template() *x509.Certificate {
|
||||
|
@ -117,7 +123,7 @@ func BuildCAX509Template() *x509.Certificate {
|
|||
|
||||
// Creates and stores CA keypair
|
||||
// Should be called with the mutex held, to prevent concurrent creation of different keys
|
||||
func (c *VFSCAStore) generateCACertificate() (*certificates, *privateKeys, error) {
|
||||
func (c *VFSCAStore) generateCACertificate(id string) (*certificates, *privateKeys, error) {
|
||||
template := BuildCAX509Template()
|
||||
|
||||
caRsaKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
|
||||
|
@ -135,14 +141,14 @@ func (c *VFSCAStore) generateCACertificate() (*certificates, *privateKeys, error
|
|||
t := time.Now().UnixNano()
|
||||
serial := pki.BuildPKISerial(t)
|
||||
|
||||
keyPath := c.buildPrivateKeyPath(CertificateId_CA, serial)
|
||||
keyPath := c.buildPrivateKeyPath(id, serial)
|
||||
err = c.storePrivateKey(caPrivateKey, keyPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Make double-sure it round-trips
|
||||
privateKeys, err := c.loadPrivateKeys(c.buildPrivateKeyPoolPath(CertificateId_CA))
|
||||
privateKeys, err := c.loadPrivateKeys(c.buildPrivateKeyPoolPath(id))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -150,14 +156,14 @@ func (c *VFSCAStore) generateCACertificate() (*certificates, *privateKeys, error
|
|||
return nil, nil, fmt.Errorf("failed to round-trip CA private key")
|
||||
}
|
||||
|
||||
certPath := c.buildCertificatePath(CertificateId_CA, serial)
|
||||
certPath := c.buildCertificatePath(id, serial)
|
||||
err = c.storeCertificate(caCertificate, certPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Make double-sure it round-trips
|
||||
certificates, err := c.loadCertificates(c.buildCertificatePoolPath(CertificateId_CA))
|
||||
certificates, err := c.loadCertificates(c.buildCertificatePoolPath(id))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -414,25 +420,34 @@ func (c *VFSCAStore) MirrorTo(basedir vfs.Path) error {
|
|||
return vfs.CopyTree(c.basedir, basedir)
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) IssueCert(id string, serial *big.Int, privateKey *pki.PrivateKey, template *x509.Certificate) (*pki.Certificate, error) {
|
||||
func (c *VFSCAStore) IssueCert(signer string, id string, serial *big.Int, privateKey *pki.PrivateKey, template *x509.Certificate) (*pki.Certificate, error) {
|
||||
glog.Infof("Issuing new certificate: %q", id)
|
||||
|
||||
template.SerialNumber = serial
|
||||
|
||||
caCertificates, caPrivateKeys, err := c.readCAKeypairs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var cert *pki.Certificate
|
||||
if template.IsCA {
|
||||
var err error
|
||||
cert, err = pki.SignNewCertificate(privateKey, template, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
caCertificates, caPrivateKeys, err := c.readCAKeypairs(signer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if caPrivateKeys == nil || caPrivateKeys.Primary() == nil {
|
||||
return nil, fmt.Errorf("ca key for %q was not found; cannot issue certificates", signer)
|
||||
}
|
||||
cert, err = pki.SignNewCertificate(privateKey, template, caCertificates.Primary().Certificate, caPrivateKeys.Primary())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if caPrivateKeys == nil || caPrivateKeys.Primary() == nil {
|
||||
return nil, fmt.Errorf("ca.key was not found; cannot issue certificates")
|
||||
}
|
||||
cert, err := pki.SignNewCertificate(privateKey, template, caCertificates.Primary().Certificate, caPrivateKeys.Primary())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = c.StoreKeypair(id, cert, privateKey)
|
||||
err := c.StoreKeypair(id, cert, privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -557,7 +572,7 @@ func (c *VFSCAStore) loadOnePrivateKey(p vfs.Path) (*pki.PrivateKey, error) {
|
|||
func (c *VFSCAStore) FindPrivateKey(id string) (*pki.PrivateKey, error) {
|
||||
var keys *privateKeys
|
||||
if id == CertificateId_CA {
|
||||
_, caPrivateKeys, err := c.readCAKeypairs()
|
||||
_, caPrivateKeys, err := c.readCAKeypairs(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -592,10 +607,10 @@ func (c *VFSCAStore) PrivateKey(id string, createIfMissing bool) (*pki.PrivateKe
|
|||
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) CreateKeypair(id string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error) {
|
||||
func (c *VFSCAStore) CreateKeypair(signer string, id string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error) {
|
||||
serial := c.buildSerial()
|
||||
|
||||
cert, err := c.IssueCert(id, serial, privateKey, template)
|
||||
cert, err := c.IssueCert(signer, id, serial, privateKey, template)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue