mirror of https://github.com/kubernetes/kops.git
Merge pull request #9378 from johngmyers/refactor-certs-3
Issue aws-iam-authenticator cert in nodeup
This commit is contained in:
commit
e7d5d323bf
|
|
@ -52,7 +52,6 @@ go_library(
|
|||
"//pkg/kubemanifest:go_default_library",
|
||||
"//pkg/model/components:go_default_library",
|
||||
"//pkg/nodelabels:go_default_library",
|
||||
"//pkg/pki:go_default_library",
|
||||
"//pkg/rbac:go_default_library",
|
||||
"//pkg/systemd:go_default_library",
|
||||
"//pkg/tokens:go_default_library",
|
||||
|
|
|
|||
|
|
@ -245,46 +245,31 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte
|
|||
}
|
||||
|
||||
{
|
||||
certificate, err := b.NodeupModelContext.KeyStore.FindCert(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error fetching %q certificate from keystore: %v", id, err)
|
||||
}
|
||||
if certificate == nil {
|
||||
return fmt.Errorf("certificate %q not found", id)
|
||||
}
|
||||
|
||||
certificateData, err := certificate.AsBytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error encoding %q certificate: %v", id, err)
|
||||
issueCert := &nodetasks.IssueCert{
|
||||
Name: id,
|
||||
Signer: fi.CertificateIDCA,
|
||||
Type: "server",
|
||||
Subject: nodetasks.PKIXName{CommonName: id},
|
||||
AlternateNames: []string{
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
},
|
||||
}
|
||||
c.AddTask(issueCert)
|
||||
certificate, privateKey, _ := issueCert.GetResources()
|
||||
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: "/srv/kubernetes/aws-iam-authenticator/cert.pem",
|
||||
Contents: fi.NewBytesResource(certificateData),
|
||||
Contents: certificate,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.String("600"),
|
||||
Owner: fi.String("aws-iam-authenticator"),
|
||||
Group: fi.String("aws-iam-authenticator"),
|
||||
})
|
||||
}
|
||||
|
||||
{
|
||||
privateKey, err := b.NodeupModelContext.KeyStore.FindPrivateKey(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error fetching %q private key from keystore: %v", id, err)
|
||||
}
|
||||
if privateKey == nil {
|
||||
return fmt.Errorf("private key %q not found", id)
|
||||
}
|
||||
|
||||
keyData, err := privateKey.AsBytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error encoding %q private key: %v", id, err)
|
||||
}
|
||||
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: "/srv/kubernetes/aws-iam-authenticator/key.pem",
|
||||
Contents: fi.NewBytesResource(keyData),
|
||||
Contents: privateKey,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.String("600"),
|
||||
Owner: fi.String("aws-iam-authenticator"),
|
||||
|
|
|
|||
|
|
@ -190,3 +190,10 @@ func TestKubeAPIServerBuilder(t *testing.T) {
|
|||
return builder.Build(target)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAwsIamAuthenticator(t *testing.T) {
|
||||
RunGoldenTest(t, "tests/golden/awsiam", "kube-apiserver", func(nodeupModelContext *NodeupModelContext, target *fi.ModelBuilderContext) error {
|
||||
builder := KubeAPIServerBuilder{NodeupModelContext: nodeupModelContext}
|
||||
return builder.Build(target)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package model
|
||||
|
||||
import (
|
||||
"crypto/x509/pkix"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
|
@ -34,7 +33,6 @@ import (
|
|||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/flagbuilder"
|
||||
"k8s.io/kops/pkg/nodelabels"
|
||||
"k8s.io/kops/pkg/pki"
|
||||
"k8s.io/kops/pkg/rbac"
|
||||
"k8s.io/kops/pkg/systemd"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
|
|
@ -113,11 +111,10 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
if b.IsMaster {
|
||||
klog.V(3).Info("kubelet bootstrap tokens are enabled and running on a master")
|
||||
|
||||
task, err := b.buildMasterKubeletKubeconfig()
|
||||
err := b.buildMasterKubeletKubeconfig(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.AddTask(task)
|
||||
}
|
||||
} else {
|
||||
kubeconfig, err := b.BuildPKIKubeconfig("kubelet")
|
||||
|
|
@ -553,49 +550,23 @@ func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, erro
|
|||
}
|
||||
|
||||
// buildMasterKubeletKubeconfig builds a kubeconfig for the master kubelet, self-signing the kubelet cert
|
||||
func (b *KubeletBuilder) buildMasterKubeletKubeconfig() (*nodetasks.File, error) {
|
||||
func (b *KubeletBuilder) buildMasterKubeletKubeconfig(c *fi.ModelBuilderContext) error {
|
||||
nodeName, err := b.NodeName()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting NodeName: %v", err)
|
||||
return fmt.Errorf("error getting NodeName: %v", err)
|
||||
}
|
||||
certName := nodetasks.PKIXName{
|
||||
CommonName: fmt.Sprintf("system:node:%s", nodeName),
|
||||
Organization: []string{rbac.NodesGroup},
|
||||
}
|
||||
|
||||
req := &pki.IssueCertRequest{
|
||||
Signer: fi.CertificateIDCA,
|
||||
Type: "client",
|
||||
Subject: pkix.Name{
|
||||
CommonName: fmt.Sprintf("system:node:%s", nodeName),
|
||||
Organization: []string{rbac.NodesGroup},
|
||||
},
|
||||
MinValidDays: 455,
|
||||
}
|
||||
|
||||
certificate, privateKey, caCert, err := pki.IssueCert(req, b.KeyStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error signing certificate for master kubelet: %v", err)
|
||||
}
|
||||
|
||||
caBytes, err := caCert.AsBytes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get certificate authority data: %s", err)
|
||||
}
|
||||
certBytes, err := certificate.AsBytes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get certificate data: %s", err)
|
||||
}
|
||||
keyBytes, err := privateKey.AsBytes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get private key data: %s", err)
|
||||
}
|
||||
|
||||
content, err := b.BuildKubeConfig("kubelet", caBytes, certBytes, keyBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &nodetasks.File{
|
||||
kubeconfig := b.BuildIssuedKubeconfig("kubelet", certName, c)
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: b.KubeletKubeConfig(),
|
||||
Contents: fi.NewStringResource(content),
|
||||
Contents: kubeconfig,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: s("600"),
|
||||
}, nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,68 @@
|
|||
apiVersion: kops.k8s.io/v1alpha2
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: minimal.example.com
|
||||
spec:
|
||||
kubernetesApiAccess:
|
||||
- 0.0.0.0/0
|
||||
authentication:
|
||||
aws: {}
|
||||
channel: stable
|
||||
cloudProvider: aws
|
||||
configBase: memfs://clusters.example.com/minimal.example.com
|
||||
etcdClusters:
|
||||
- cpuRequest: 200m
|
||||
etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: us-test-1a
|
||||
memoryRequest: 100Mi
|
||||
name: main
|
||||
provider: Manager
|
||||
backups:
|
||||
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-main
|
||||
- cpuRequest: 100m
|
||||
etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: us-test-1a
|
||||
memoryRequest: 100Mi
|
||||
name: events
|
||||
provider: Manager
|
||||
backups:
|
||||
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-events
|
||||
kubelet:
|
||||
anonymousAuth: false
|
||||
kubernetesVersion: v1.18.0
|
||||
masterInternalName: api.internal.minimal.example.com
|
||||
masterPublicName: api.minimal.example.com
|
||||
networkCIDR: 172.20.0.0/16
|
||||
networking:
|
||||
kubenet: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
sshAccess:
|
||||
- 0.0.0.0/0
|
||||
topology:
|
||||
masters: public
|
||||
nodes: public
|
||||
subnets:
|
||||
- cidr: 172.20.32.0/19
|
||||
name: us-test-1a
|
||||
type: Public
|
||||
zone: us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops.k8s.io/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
name: master-us-test-1a
|
||||
labels:
|
||||
kops.k8s.io/cluster: minimal.example.com
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
image: ami-1234
|
||||
machineType: m3.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Master
|
||||
subnets:
|
||||
- us-test-1a
|
||||
|
|
@ -0,0 +1,268 @@
|
|||
contents: |
|
||||
apiVersion: ""
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMyRENDQWNDZ0F3SUJBZ0lSQUxKWEFrVmo5NjR0cTY3d01TSThvSlF3RFFZSktvWklodmNOQVFFTEJRQXcKRlRFVE1CRUdBMVVFQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4TnpFeU1qY3lNelV5TkRCYUZ3MHlOekV5TWpjeQpNelV5TkRCYU1CVXhFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBCkE0SUJEd0F3Z2dFS0FvSUJBUURnbkNrU210bm1meEVnUzNxTlBhVUNINVFPQkdESC9pbkhiV0NPRExCQ0s5Z2QKWEVjQmw3RlZ2OFQya0ZyMURZYjBIVkR0TUk3dGl4UlZGRExna3dObFczNHh3V2RaWEI3R2VvRmdVMXhXT1FTWQpPQUNDOEpnWVRRLzEzOUhCRXZncTRzZWo2N3ArL3MvU05jdzM0S2s3SEl1RmhsazFyUms1a01leEtJbEpCS1AxCllZVVlldHNKL1FwVU9rcUo1SFc0R29ldEU3Nll0SG5PUmZZdm55YnZpU01yaDJ3R0dhTjZyL3M0Q2hPYUliWkMKQW44L1lpUEtHSURhWkdwajZHWG5tWEFSUlgvVElkZ1NRa0x3dDBhVERCblBaNFh2dHBJOGFhTDhEWUpJcUF6QQpOUEgyYjQvdU55bGF0NWpEbzBiMEc1NGFnTWk5NysyQVVyQzlVVVhwQWdNQkFBR2pJekFoTUE0R0ExVWREd0VCCi93UUVBd0lCQmpBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFCVkdSMnIKaHpYelJNVTV3cmlQUUFKU2Nzek5PUnZvQnBYZlpvWjA5Rkl1cHVkRnhCVlUzZDRoVjlTdEtuUWdQU0dBNVhRTwpIRTk3K0J4SkR1QS9yQjVvQlVzTUJqYzd5MWNkZS9UNmhtaTNyTG9FWUJTblN1ZENPWEpFNEc5LzBmOGJ5QUplCnJOOCtObzFyMlZnWnZaaDZwNzRURWtYdi9sM0hCUFdNN0lkVVYwSE85SkRoU2dPVkYxZnlRS0p4UnVMSlI4anQKTzZtUEgyVVgwdk13VmE0anZ3dGtkZHFrMk9BZFlRdkg5cmJEampiemFpVzBLbm1kdWVSbzkyS0hBTjdCc0RaeQpWcFhIcHFvMUt6ZzdEM2ZwYVhDZjVzaTdscXFyZEpWWEg0SkM3Mnp4c1BlaHFnaThlSXVxT0JraURXbVJ4QXhoCjh5R2VSeDlBYmtuSGg0SWEKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
server: https://127.0.0.1:21362/authenticate
|
||||
name: aws-iam-authenticator
|
||||
contexts:
|
||||
- context:
|
||||
cluster: aws-iam-authenticator
|
||||
user: kube-apiserver
|
||||
name: webhook
|
||||
current-context: webhook
|
||||
kind: ""
|
||||
users:
|
||||
- name: kube-apiserver
|
||||
user: {}
|
||||
mode: "600"
|
||||
path: /etc/kubernetes/authn.config
|
||||
type: file
|
||||
---
|
||||
contents: |
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
annotations:
|
||||
dns.alpha.kubernetes.io/external: api.minimal.example.com
|
||||
dns.alpha.kubernetes.io/internal: api.internal.minimal.example.com
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
k8s-app: kube-apiserver
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --allow-privileged=true
|
||||
- --anonymous-auth=false
|
||||
- --apiserver-count=1
|
||||
- --authentication-token-webhook-config-file=/etc/kubernetes/authn.config
|
||||
- --authorization-mode=AlwaysAllow
|
||||
- --bind-address=0.0.0.0
|
||||
- --client-ca-file=/srv/kubernetes/ca.crt
|
||||
- --cloud-provider=aws
|
||||
- --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NodeRestriction,ResourceQuota
|
||||
- --etcd-cafile=/etc/kubernetes/pki/kube-apiserver/etcd-ca.crt
|
||||
- --etcd-certfile=/etc/kubernetes/pki/kube-apiserver/etcd-client.crt
|
||||
- --etcd-keyfile=/etc/kubernetes/pki/kube-apiserver/etcd-client.key
|
||||
- --etcd-servers-overrides=/events#https://127.0.0.1:4002
|
||||
- --etcd-servers=https://127.0.0.1:4001
|
||||
- --insecure-bind-address=127.0.0.1
|
||||
- --insecure-port=0
|
||||
- --kubelet-client-certificate=/srv/kubernetes/kubelet-api.crt
|
||||
- --kubelet-client-key=/srv/kubernetes/kubelet-api.key
|
||||
- --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP
|
||||
- --proxy-client-cert-file=/srv/kubernetes/apiserver-aggregator.cert
|
||||
- --proxy-client-key-file=/srv/kubernetes/apiserver-aggregator.key
|
||||
- --requestheader-allowed-names=aggregator
|
||||
- --requestheader-client-ca-file=/srv/kubernetes/apiserver-aggregator-ca.cert
|
||||
- --requestheader-extra-headers-prefix=X-Remote-Extra-
|
||||
- --requestheader-group-headers=X-Remote-Group
|
||||
- --requestheader-username-headers=X-Remote-User
|
||||
- --secure-port=443
|
||||
- --service-cluster-ip-range=100.64.0.0/13
|
||||
- --storage-backend=etcd3
|
||||
- --tls-cert-file=/srv/kubernetes/server.cert
|
||||
- --tls-private-key-file=/srv/kubernetes/server.key
|
||||
- --v=2
|
||||
- --logtostderr=false
|
||||
- --alsologtostderr
|
||||
- --log-file=/var/log/kube-apiserver.log
|
||||
command:
|
||||
- /usr/local/bin/kube-apiserver
|
||||
image: k8s.gcr.io/kube-apiserver:v1.18.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 45
|
||||
timeoutSeconds: 15
|
||||
name: kube-apiserver
|
||||
ports:
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
name: https
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
volumeMounts:
|
||||
- mountPath: /var/log/kube-apiserver.log
|
||||
name: logfile
|
||||
- mountPath: /etc/ssl
|
||||
name: etcssl
|
||||
readOnly: true
|
||||
- mountPath: /etc/pki/tls
|
||||
name: etcpkitls
|
||||
readOnly: true
|
||||
- mountPath: /etc/pki/ca-trust
|
||||
name: etcpkica-trust
|
||||
readOnly: true
|
||||
- mountPath: /usr/share/ssl
|
||||
name: usrsharessl
|
||||
readOnly: true
|
||||
- mountPath: /usr/ssl
|
||||
name: usrssl
|
||||
readOnly: true
|
||||
- mountPath: /usr/lib/ssl
|
||||
name: usrlibssl
|
||||
readOnly: true
|
||||
- mountPath: /usr/local/openssl
|
||||
name: usrlocalopenssl
|
||||
readOnly: true
|
||||
- mountPath: /var/ssl
|
||||
name: varssl
|
||||
readOnly: true
|
||||
- mountPath: /etc/openssl
|
||||
name: etcopenssl
|
||||
readOnly: true
|
||||
- mountPath: /etc/kubernetes/pki/kube-apiserver
|
||||
name: pki
|
||||
- mountPath: /srv/kubernetes
|
||||
name: srvkube
|
||||
readOnly: true
|
||||
- mountPath: /srv/sshproxy
|
||||
name: srvsshproxy
|
||||
readOnly: true
|
||||
- mountPath: /etc/kubernetes/authn.config
|
||||
name: authn-config
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/log/kube-apiserver.log
|
||||
name: logfile
|
||||
- hostPath:
|
||||
path: /etc/ssl
|
||||
name: etcssl
|
||||
- hostPath:
|
||||
path: /etc/pki/tls
|
||||
name: etcpkitls
|
||||
- hostPath:
|
||||
path: /etc/pki/ca-trust
|
||||
name: etcpkica-trust
|
||||
- hostPath:
|
||||
path: /usr/share/ssl
|
||||
name: usrsharessl
|
||||
- hostPath:
|
||||
path: /usr/ssl
|
||||
name: usrssl
|
||||
- hostPath:
|
||||
path: /usr/lib/ssl
|
||||
name: usrlibssl
|
||||
- hostPath:
|
||||
path: /usr/local/openssl
|
||||
name: usrlocalopenssl
|
||||
- hostPath:
|
||||
path: /var/ssl
|
||||
name: varssl
|
||||
- hostPath:
|
||||
path: /etc/openssl
|
||||
name: etcopenssl
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/pki/kube-apiserver
|
||||
type: DirectoryOrCreate
|
||||
name: pki
|
||||
- hostPath:
|
||||
path: /srv/kubernetes
|
||||
name: srvkube
|
||||
- hostPath:
|
||||
path: /srv/sshproxy
|
||||
name: srvsshproxy
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/authn.config
|
||||
name: authn-config
|
||||
status: {}
|
||||
path: /etc/kubernetes/manifests/kube-apiserver.manifest
|
||||
type: file
|
||||
---
|
||||
mode: "0755"
|
||||
path: /srv/kubernetes
|
||||
type: directory
|
||||
---
|
||||
contents:
|
||||
task:
|
||||
Name: aws-iam-authenticator
|
||||
alternateNames:
|
||||
- localhost
|
||||
- 127.0.0.1
|
||||
signer: ca
|
||||
subject:
|
||||
CommonName: aws-iam-authenticator
|
||||
type: server
|
||||
group: aws-iam-authenticator
|
||||
mode: "600"
|
||||
owner: aws-iam-authenticator
|
||||
path: /srv/kubernetes/aws-iam-authenticator/cert.pem
|
||||
type: file
|
||||
---
|
||||
contents:
|
||||
task:
|
||||
Name: aws-iam-authenticator
|
||||
alternateNames:
|
||||
- localhost
|
||||
- 127.0.0.1
|
||||
signer: ca
|
||||
subject:
|
||||
CommonName: aws-iam-authenticator
|
||||
type: server
|
||||
group: aws-iam-authenticator
|
||||
mode: "600"
|
||||
owner: aws-iam-authenticator
|
||||
path: /srv/kubernetes/aws-iam-authenticator/key.pem
|
||||
type: file
|
||||
---
|
||||
contents:
|
||||
task:
|
||||
Name: kubelet-api
|
||||
signer: ca
|
||||
subject:
|
||||
CommonName: kubelet-api
|
||||
type: client
|
||||
mode: "0644"
|
||||
path: /srv/kubernetes/kubelet-api.crt
|
||||
type: file
|
||||
---
|
||||
contents:
|
||||
task:
|
||||
Name: kubelet-api
|
||||
signer: ca
|
||||
subject:
|
||||
CommonName: kubelet-api
|
||||
type: client
|
||||
mode: "0600"
|
||||
path: /srv/kubernetes/kubelet-api.key
|
||||
type: file
|
||||
---
|
||||
contents: ""
|
||||
ifNotExists: true
|
||||
mode: "0400"
|
||||
path: /var/log/kube-apiserver.log
|
||||
type: file
|
||||
---
|
||||
Name: aws-iam-authenticator
|
||||
alternateNames:
|
||||
- localhost
|
||||
- 127.0.0.1
|
||||
signer: ca
|
||||
subject:
|
||||
CommonName: aws-iam-authenticator
|
||||
type: server
|
||||
---
|
||||
Name: kubelet-api
|
||||
signer: ca
|
||||
subject:
|
||||
CommonName: kubelet-api
|
||||
type: client
|
||||
---
|
||||
Name: aws-iam-authenticator
|
||||
home: /srv/kubernetes/aws-iam-authenticator
|
||||
shell: /sbin/nologin
|
||||
uid: 10000
|
||||
|
|
@ -218,24 +218,6 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
c.AddTask(t)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.Authentication != nil {
|
||||
if b.KopsModelContext.Cluster.Spec.Authentication.Aws != nil {
|
||||
alternateNames := []string{
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
}
|
||||
|
||||
t := &fitasks.Keypair{
|
||||
Name: fi.String("aws-iam-authenticator"),
|
||||
Subject: "cn=aws-iam-authenticator",
|
||||
Type: "server",
|
||||
AlternateNames: alternateNames,
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
}
|
||||
|
||||
// @TODO this is VERY presumptuous, i'm going on the basis we can make it configurable in the future.
|
||||
// But I'm conscious not to do too much work on bootstrap tokens as it might overlay further down the
|
||||
// line with the machines api
|
||||
|
|
|
|||
|
|
@ -20,14 +20,10 @@ import (
|
|||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math/big"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var wellKnownCertificateTypes = map[string]string{
|
||||
|
|
@ -49,10 +45,8 @@ type IssueCertRequest struct {
|
|||
|
||||
// PrivateKey is the private key for this certificate. If nil, a new private key will be generated.
|
||||
PrivateKey *PrivateKey
|
||||
// MinValidDays is the lower bound on the certificate validity, in days. If specified, up to 30 days
|
||||
// will be added so that certificate generated at the same time on different hosts will be unlikely to
|
||||
// expire at the same time. The default is 10 years (without the up to 30 day skew).
|
||||
MinValidDays int
|
||||
// Validity is the certificate validity. The default is 10 years.
|
||||
Validity time.Duration
|
||||
|
||||
// Serial is the certificate serial number. If nil, a random number will be generated.
|
||||
Serial *big.Int
|
||||
|
|
@ -144,23 +138,8 @@ func IssueCert(request *IssueCertRequest, keystore Keystore) (issuedCertificate
|
|||
}
|
||||
}
|
||||
|
||||
// Skew the certificate lifetime by up to 30 days based on information about the generating node.
|
||||
// This is so that different nodes created at the same time have the certificates they generated
|
||||
// expire at different times, but all certificates on a given node expire around the same time.
|
||||
if request.MinValidDays != 0 {
|
||||
hash := fnv.New32()
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
sort.Slice(addrs, func(i, j int) bool {
|
||||
return addrs[i].String() < addrs[j].String()
|
||||
})
|
||||
if err == nil {
|
||||
for _, addr := range addrs {
|
||||
_, _ = hash.Write([]byte(addr.String()))
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("cannot skew certificate lifetime: failed to get interface addresses: %v", err)
|
||||
}
|
||||
template.NotAfter = time.Now().Add(time.Hour * 24 * time.Duration(request.MinValidDays)).Add(time.Hour * time.Duration(hash.Sum32()%(30*24))).UTC()
|
||||
if request.Validity != 0 {
|
||||
template.NotAfter = time.Now().Add(request.Validity).UTC()
|
||||
}
|
||||
|
||||
certificate, err := signNewCertificate(privateKey, template, signer, caPrivateKey)
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ func TestIssueCert(t *testing.T) {
|
|||
Subject: pkix.Name{
|
||||
CommonName: "Test client",
|
||||
},
|
||||
MinValidDays: 365,
|
||||
Validity: time.Hour * 24 * 365,
|
||||
},
|
||||
expectedKeyUsage: x509.KeyUsageDigitalSignature,
|
||||
expectedExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
|
|
@ -133,10 +133,10 @@ func TestIssueCert(t *testing.T) {
|
|||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var minExpectedValidity int64
|
||||
if tc.req.MinValidDays == 0 {
|
||||
if tc.req.Validity == 0 {
|
||||
minExpectedValidity = time.Now().Add(time.Hour * 10 * 365 * 24).Unix()
|
||||
} else {
|
||||
minExpectedValidity = time.Now().Add(time.Hour * 24 * time.Duration(tc.req.MinValidDays)).Unix()
|
||||
minExpectedValidity = time.Now().Add(tc.req.Validity).Unix()
|
||||
}
|
||||
|
||||
var keystore Keystore
|
||||
|
|
@ -202,10 +202,10 @@ func TestIssueCert(t *testing.T) {
|
|||
|
||||
// validity
|
||||
var maxExpectedValidity int64
|
||||
if tc.req.MinValidDays == 0 {
|
||||
if tc.req.Validity == 0 {
|
||||
maxExpectedValidity = time.Now().Add(time.Hour * 10 * 365 * 24).Unix()
|
||||
} else {
|
||||
maxExpectedValidity = time.Now().Add(time.Hour * 24 * time.Duration(tc.req.MinValidDays+30)).Unix()
|
||||
maxExpectedValidity = time.Now().Add(tc.req.Validity).Unix()
|
||||
}
|
||||
assert.Less(t, cert.NotBefore.Unix(), time.Now().Add(time.Hour*-47).Unix(), "NotBefore")
|
||||
assert.GreaterOrEqual(t, cert.NotAfter.Unix(), minExpectedValidity, "NotAfter")
|
||||
|
|
|
|||
|
|
@ -20,8 +20,12 @@ import (
|
|||
"bytes"
|
||||
"crypto/x509/pkix"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kops/pkg/pki"
|
||||
|
|
@ -120,11 +124,28 @@ func (i *IssueCert) AddFileTasks(c *fi.ModelBuilderContext, dir string, name str
|
|||
}
|
||||
|
||||
func (e *IssueCert) Run(c *fi.Context) error {
|
||||
// Skew the certificate lifetime by up to 30 days based on information about the generating node.
|
||||
// This is so that different nodes created at the same time have the certificates they generated
|
||||
// expire at different times, but all certificates on a given node expire around the same time.
|
||||
hash := fnv.New32()
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
sort.Slice(addrs, func(i, j int) bool {
|
||||
return addrs[i].String() < addrs[j].String()
|
||||
})
|
||||
if err == nil {
|
||||
for _, addr := range addrs {
|
||||
_, _ = hash.Write([]byte(addr.String()))
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("cannot skew certificate lifetime: failed to get interface addresses: %v", err)
|
||||
}
|
||||
validHours := (455 * 24) + (hash.Sum32() % (30 * 24))
|
||||
|
||||
req := &pki.IssueCertRequest{
|
||||
Signer: e.Signer,
|
||||
Type: e.Type,
|
||||
Subject: e.Subject.toPKIXName(),
|
||||
MinValidDays: 455,
|
||||
Signer: e.Signer,
|
||||
Type: e.Type,
|
||||
Subject: e.Subject.toPKIXName(),
|
||||
Validity: time.Hour * time.Duration(validHours),
|
||||
}
|
||||
|
||||
klog.Infof("signing certificate for %q", e.Name)
|
||||
|
|
|
|||
Loading…
Reference in New Issue