Merge pull request #9422 from johngmyers/trim-loader

Remove dead cloudup code
This commit is contained in:
Kubernetes Prow Robot 2020-06-28 13:42:14 -07:00 committed by GitHub
commit 679b9db9a1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 126 additions and 549 deletions

View File

@ -66,7 +66,6 @@ type CreateClusterOptions struct {
ClusterName string
Yes bool
Target string
Models string
Cloud string
Zones []string
MasterZones []string
@ -167,7 +166,6 @@ type CreateClusterOptions struct {
func (o *CreateClusterOptions) InitDefaults() {
o.Yes = false
o.Target = cloudup.TargetDirect
o.Models = strings.Join(cloudup.CloudupModels, ",")
o.Networking = "kubenet"
o.Channel = api.DefaultChannel
o.Topology = api.TopologyPublic
@ -282,7 +280,6 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Specify --yes to immediately create the cluster")
cmd.Flags().StringVar(&options.Target, "target", options.Target, fmt.Sprintf("Valid targets: %s, %s, %s. Set this flag to %s if you want kops to generate terraform", cloudup.TargetDirect, cloudup.TargetTerraform, cloudup.TargetCloudformation, cloudup.TargetTerraform))
cmd.Flags().StringVar(&options.Models, "model", options.Models, "Models to apply (separate multiple models with commas)")
// Configuration / state location
if featureflag.EnableSeparateConfigBase.Enabled() {
@ -1358,7 +1355,6 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr
updateClusterOptions.Yes = c.Yes
updateClusterOptions.Target = c.Target
updateClusterOptions.Models = c.Models
updateClusterOptions.OutDir = c.OutDir
// SSHPublicKey has already been mapped

View File

@ -61,7 +61,6 @@ var (
type UpdateClusterOptions struct {
Yes bool
Target string
Models string
OutDir string
SSHPublicKey string
RunTasksOptions fi.RunTasksOptions
@ -77,7 +76,6 @@ type UpdateClusterOptions struct {
func (o *UpdateClusterOptions) InitDefaults() {
o.Yes = false
o.Target = "direct"
o.Models = strings.Join(cloudup.CloudupModels, ",")
o.SSHPublicKey = ""
o.OutDir = ""
o.CreateKubecfg = true
@ -111,7 +109,6 @@ func NewCmdUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Create cloud resources, without --yes update is in dry run mode")
cmd.Flags().StringVar(&options.Target, "target", options.Target, "Target - direct, terraform, cloudformation")
cmd.Flags().StringVar(&options.Models, "model", options.Models, "Models to apply (separate multiple models with commas)")
cmd.Flags().StringVar(&options.SSHPublicKey, "ssh-public-key", options.SSHPublicKey, "SSH public key to use (deprecated: use kops create secret instead)")
cmd.Flags().StringVar(&options.OutDir, "out", options.OutDir, "Path to write any local output")
cmd.Flags().BoolVar(&options.CreateKubecfg, "create-kube-config", options.CreateKubecfg, "Will control automatically creating the kube config file on your local filesystem")
@ -240,7 +237,6 @@ func RunUpdateCluster(ctx context.Context, f *util.Factory, clusterName string,
Cluster: cluster,
DryRun: isDryrun,
RunTasksOptions: &c.RunTasksOptions,
Models: strings.Split(c.Models, ","),
OutDir: c.OutDir,
Phase: phase,
TargetName: targetName,

View File

@ -93,7 +93,6 @@ kops create cluster [flags]
--master-tenancy string The tenancy of the master group on AWS. Can either be default or dedicated.
--master-volume-size int32 Set instance volume size (in GB) for masters
--master-zones strings Zones in which to run masters (must be an odd number)
--model string Models to apply (separate multiple models with commas) (default "proto,cloudup")
--network-cidr string Set to override the default network CIDR
--networking string Networking mode to use. kubenet, external, weave, flannel-vxlan (or flannel), flannel-udp, calico, canal, kube-router, amazon-vpc-routed-eni, cilium, cni, lyftvpc. (default "kubenet")
--node-count int32 Set number of nodes

View File

@ -28,7 +28,6 @@ kops update cluster [flags]
--create-kube-config Will control automatically creating the kube config file on your local filesystem (default true)
-h, --help help for cluster
--lifecycle-overrides strings comma separated list of phase overrides, example: SecurityGroups=Ignore,InternetGateway=ExistsAndWarnIfChanges
--model string Models to apply (separate multiple models with commas) (default "proto,cloudup")
--out string Path to write any local output
--phase string Subset of tasks to run: assets, cluster, network, security
--ssh-public-key string SSH public key to use (deprecated: use kops create secret instead)

View File

@ -65,7 +65,6 @@ A user can build a `cloudup.ApplyClusterCmd` defined [here](https://github.com/k
```go
applyCmd := &cloudup.ApplyClusterCmd{
Cluster: cluster,
Models: []string{"config", "proto", "cloudup"}, // ${GOPATH}/src/k8s.io/kops/upup/pkg/fi/cloudup/apply_cluster.go:52
Clientset: clientset,
TargetName: "target", // ${GOPATH}/src/k8s.io/kops/upup/pkg/fi/cloudup/target.go:19
OutDir: c.OutDir,
@ -94,31 +93,18 @@ This is where we enter the **core** of `kops` logic. The starting point can be f
**Note** As it stands the `FindVPCInfo()` function is a defined member of the interface. This is AWS only, and will eventually be pulled out of the interface. For now please implement the function as a no-op.
#### d) Models
#### d) The model
A model is an evolution from the static YAML models in `kops v1.4`. There is a lot of improvements planned for these in the next major kops release. The models are indexed by a string. With the 3 primary models being
The model is what maps an ambiguous Cluster Spec (defined earlier) to **tasks**. Each **task** is a representation of an API request against a cloud.
If you plan on implementing a new cloud, one option would be to define a new model context type, and build custom model builders for your cloud's objects.
```
config
proto
cloudup
```
Models are what map an ambiguous Cluster Spec (defined earlier) to **tasks**. Each **task** is a representation of an API request against a cloud. If you plan on implementing a new cloud, one option would be to define a new model, and build custom model code for your new model.
The `cloudup` model is what is used to map a cluster spec with `cluster.Spec.CloudProvider` = `aws`.
**Note** this name is probably a misnomer, and is a reflection of the evolution of the `kops` core.
The existing `cloudup` model code can be found [here](https://github.com/kubernetes/kops/tree/master/pkg/model).
**Note** that there is room here to redefine the directory structure based on models. EG: Moving these models into a new package, and renaming the model key.
The existing model code can be found [here](https://github.com/kubernetes/kops/tree/master/pkg/model).
Once a model builder has been defined as in [here](https://github.com/kubernetes/kops/blob/master/upup/pkg/fi/cloudup/apply_cluster.go#L373) the code will automatically be called.
From within the builder, we notice there is concrete logic for each builder. The logic will dictate which tasks need to be called in order to apply a resource to a cloud. The tasks are added by calling the `AddTask()` function as in [here](https://github.com/kubernetes/kops/blob/master/pkg/model/network.go#L69).
Once the models have been parsed, all the tasks should have been set.
Once the model builders have been called all the tasks should have been set.
#### e) Tasks

View File

@ -60,18 +60,12 @@ go_library(
"//upup/models:go_default_library",
"//upup/pkg/fi:go_default_library",
"//upup/pkg/fi/assettasks:go_default_library",
"//upup/pkg/fi/cloudup/alitasks:go_default_library",
"//upup/pkg/fi/cloudup/aliup:go_default_library",
"//upup/pkg/fi/cloudup/awstasks:go_default_library",
"//upup/pkg/fi/cloudup/awsup:go_default_library",
"//upup/pkg/fi/cloudup/cloudformation:go_default_library",
"//upup/pkg/fi/cloudup/do:go_default_library",
"//upup/pkg/fi/cloudup/dotasks:go_default_library",
"//upup/pkg/fi/cloudup/gce:go_default_library",
"//upup/pkg/fi/cloudup/gcetasks:go_default_library",
"//upup/pkg/fi/cloudup/openstack:go_default_library",
"//upup/pkg/fi/cloudup/openstacktasks:go_default_library",
"//upup/pkg/fi/cloudup/spotinsttasks:go_default_library",
"//upup/pkg/fi/cloudup/terraform:go_default_library",
"//upup/pkg/fi/fitasks:go_default_library",
"//upup/pkg/fi/loader:go_default_library",

View File

@ -52,20 +52,13 @@ import (
"k8s.io/kops/pkg/templates"
"k8s.io/kops/upup/models"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/alitasks"
"k8s.io/kops/upup/pkg/fi/cloudup/aliup"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/cloudformation"
"k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/dotasks"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
"k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks"
"k8s.io/kops/upup/pkg/fi/cloudup/spotinsttasks"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/util/pkg/architectures"
"k8s.io/kops/util/pkg/hashing"
"k8s.io/kops/util/pkg/vfs"
@ -82,8 +75,6 @@ var (
AlphaAllowGCE = featureflag.New("AlphaAllowGCE", featureflag.Bool(false))
// AlphaAllowALI is a feature flag that gates aliyun support while it is alpha
AlphaAllowALI = featureflag.New("AlphaAllowALI", featureflag.Bool(false))
// CloudupModels a list of supported models
CloudupModels = []string{"proto", "cloudup"}
// OldestSupportedKubernetesVersion is the oldest kubernetes version that is supported in Kops
OldestSupportedKubernetesVersion = "1.11.0"
// OldestRecommendedKubernetesVersion is the oldest kubernetes version that is not deprecated in Kops
@ -101,9 +92,6 @@ type ApplyClusterCmd struct {
// NodeUpHash is the sha hash
NodeUpHash map[architectures.Architecture]string
// Models is a list of cloudup models to apply
Models []string
// TargetName specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
TargetName string
@ -168,10 +156,6 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
}
}
if c.Models == nil {
c.Models = CloudupModels
}
modelStore, err := findModelStore()
if err != nil {
return err
@ -331,14 +315,6 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
checkExisting := true
l.AddTypes(map[string]interface{}{
"keypair": &fitasks.Keypair{},
"secret": &fitasks.Secret{},
"managedFile": &fitasks.ManagedFile{},
"mirrorKeystore": &fitasks.MirrorKeystore{},
"mirrorSecrets": &fitasks.MirrorSecrets{},
})
region := ""
project := ""
@ -371,16 +347,6 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
}
modelContext.SSHPublicKeys = sshPublicKeys
l.AddTypes(map[string]interface{}{
"Disk": &gcetasks.Disk{},
"Instance": &gcetasks.Instance{},
"InstanceTemplate": &gcetasks.InstanceTemplate{},
"Network": &gcetasks.Network{},
"InstanceGroupManager": &gcetasks.InstanceGroupManager{},
"FirewallRule": &gcetasks.FirewallRule{},
"Address": &gcetasks.Address{},
})
}
case kops.CloudProviderDO:
@ -394,60 +360,12 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
}
modelContext.SSHPublicKeys = sshPublicKeys
l.AddTypes(map[string]interface{}{
"volume": &dotasks.Volume{},
"droplet": &dotasks.Droplet{},
"loadbalancer": &dotasks.LoadBalancer{},
})
}
case kops.CloudProviderAWS:
{
awsCloud := cloud.(awsup.AWSCloud)
region = awsCloud.Region()
l.AddTypes(map[string]interface{}{
// EC2
"elasticIP": &awstasks.ElasticIP{},
"instance": &awstasks.Instance{},
"instanceElasticIPAttachment": &awstasks.InstanceElasticIPAttachment{},
"instanceVolumeAttachment": &awstasks.InstanceVolumeAttachment{},
"ebsVolume": &awstasks.EBSVolume{},
"sshKey": &awstasks.SSHKey{},
// IAM
"iamInstanceProfile": &awstasks.IAMInstanceProfile{},
"iamInstanceProfileRole": &awstasks.IAMInstanceProfileRole{},
"iamRole": &awstasks.IAMRole{},
"iamRolePolicy": &awstasks.IAMRolePolicy{},
// VPC / Networking
"dhcpOptions": &awstasks.DHCPOptions{},
"internetGateway": &awstasks.InternetGateway{},
"route": &awstasks.Route{},
"routeTable": &awstasks.RouteTable{},
"routeTableAssociation": &awstasks.RouteTableAssociation{},
"securityGroup": &awstasks.SecurityGroup{},
"securityGroupRule": &awstasks.SecurityGroupRule{},
"subnet": &awstasks.Subnet{},
"vpc": &awstasks.VPC{},
"ngw": &awstasks.NatGateway{},
"vpcDHDCPOptionsAssociation": &awstasks.VPCDHCPOptionsAssociation{},
// ELB
"loadBalancer": &awstasks.LoadBalancer{},
"loadBalancerAttachment": &awstasks.LoadBalancerAttachment{},
// Autoscaling
"autoscalingGroup": &awstasks.AutoscalingGroup{},
"launchConfiguration": &awstasks.LaunchConfiguration{},
// Spotinst
"spotinstElastigroup": &spotinsttasks.Elastigroup{},
"spotinstOcean": &spotinsttasks.Ocean{},
"spotinstLaunchSpec": &spotinsttasks.LaunchSpec{},
})
if len(sshPublicKeys) == 0 && c.Cluster.Spec.SSHKeyName == nil {
return fmt.Errorf("SSH public key must be specified when running with AWS (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
@ -467,21 +385,6 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
aliCloud := cloud.(aliup.ALICloud)
region = aliCloud.Region()
l.AddTypes(map[string]interface{}{
"Vpc": &alitasks.VPC{},
"VSwitch": &alitasks.VSwitch{},
"Disk": &alitasks.Disk{},
"SecurityGroup": &alitasks.SecurityGroup{},
"SecurityGroupRule": &alitasks.SecurityGroupRule{},
"LoadBalancer": &alitasks.LoadBalancer{},
"LoadBalancerListener": &alitasks.LoadBalancerListener{},
"LoadBalancerACL": &alitasks.LoadBalancerACL{},
"AutoscalingGroup": &alitasks.ScalingGroup{},
"LaunchConfiguration": &alitasks.LaunchConfiguration{},
"RAMPolicy": &alitasks.RAMPolicy{},
"RAMRole": &alitasks.RAMRole{},
"SSHKey": &alitasks.SSHKey{},
})
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with ALICloud (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
@ -500,23 +403,6 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
osCloud := cloud.(openstack.OpenstackCloud)
region = osCloud.Region()
l.AddTypes(map[string]interface{}{
// Compute
"sshKey": &openstacktasks.SSHKey{},
"serverGroup": &openstacktasks.ServerGroup{},
"instance": &openstacktasks.Instance{},
// Networking
"network": &openstacktasks.Network{},
"subnet": &openstacktasks.Subnet{},
"router": &openstacktasks.Router{},
"securityGroup": &openstacktasks.SecurityGroup{},
"securityGroupRule": &openstacktasks.SecurityGroupRule{},
// BlockStorage
"volume": &openstacktasks.Volume{},
// LB
"lb": &openstacktasks.LB{},
})
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with Openstack (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
@ -553,140 +439,126 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
}
l.Tags = clusterTags
l.WorkDir = c.OutDir
l.ModelStore = modelStore
var fileModels []string
for _, m := range c.Models {
switch m {
case "proto":
// No proto code options; no file model
{
templates, err := templates.LoadTemplates(cluster, models.NewAssetPath("cloudup/resources"))
if err != nil {
return fmt.Errorf("error loading templates: %v", err)
}
case "cloudup":
templates, err := templates.LoadTemplates(cluster, models.NewAssetPath("cloudup/resources"))
if err != nil {
return fmt.Errorf("error loading templates: %v", err)
}
err = tf.AddTo(templates.TemplateFunctions, secretStore)
if err != nil {
return err
}
err = tf.AddTo(templates.TemplateFunctions, secretStore)
if err != nil {
return err
l.Builders = append(l.Builders,
&BootstrapChannelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
assetBuilder: assetBuilder,
templates: templates,
},
&model.PKIModelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&kubeapiserver.KubeApiserverBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&etcdmanager.EtcdManagerBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
)
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&BootstrapChannelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
assetBuilder: assetBuilder,
templates: templates,
},
&model.PKIModelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&kubeapiserver.KubeApiserverBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&etcdmanager.EtcdManagerBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&awsmodel.APILoadBalancerBuilder{AWSModelContext: awsModelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.BastionModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.DNSModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&model.ExternalAccessModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.FirewallModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.SSHKeyModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.NetworkModelBuilder{KopsModelContext: modelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&awsmodel.APILoadBalancerBuilder{AWSModelContext: awsModelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.BastionModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.DNSModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&model.ExternalAccessModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.FirewallModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.SSHKeyModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
l.Builders = append(l.Builders,
&model.IAMModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
case kops.CloudProviderDO:
doModelContext := &domodel.DOModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&domodel.APILoadBalancerModelBuilder{DOModelContext: doModelContext, Lifecycle: &securityLifecycle},
)
l.Builders = append(l.Builders,
&model.NetworkModelBuilder{KopsModelContext: modelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&model.IAMModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
case kops.CloudProviderDO:
doModelContext := &domodel.DOModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&domodel.APILoadBalancerModelBuilder{DOModelContext: doModelContext, Lifecycle: &securityLifecycle},
)
case kops.CloudProviderGCE:
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
storageACLLifecycle := securityLifecycle
if storageACLLifecycle != fi.LifecycleIgnore {
// This is a best-effort permissions fix
storageACLLifecycle = fi.LifecycleWarnIfInsufficientAccess
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&gcemodel.APILoadBalancerBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.ExternalAccessModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.FirewallModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.NetworkModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&gcemodel.StorageAclBuilder{GCEModelContext: gceModelContext, Cloud: cloud.(gce.GCECloud), Lifecycle: &storageACLLifecycle},
)
case kops.CloudProviderALI:
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&alimodel.APILoadBalancerModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.NetworkModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.RAMModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.SSHKeyModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.FirewallModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.ExternalAccessModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
)
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
// &openstackmodel.APILBModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &clusterLifecycle},
&openstackmodel.NetworkModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &networkLifecycle},
&openstackmodel.SSHKeyModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
&openstackmodel.FirewallModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
)
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
case kops.CloudProviderGCE:
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
fileModels = append(fileModels, m)
storageACLLifecycle := securityLifecycle
if storageACLLifecycle != fi.LifecycleIgnore {
// This is a best-effort permissions fix
storageACLLifecycle = fi.LifecycleWarnIfInsufficientAccess
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&gcemodel.APILoadBalancerBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.ExternalAccessModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.FirewallModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.NetworkModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&gcemodel.StorageAclBuilder{GCEModelContext: gceModelContext, Cloud: cloud.(gce.GCECloud), Lifecycle: &storageACLLifecycle},
)
case kops.CloudProviderALI:
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&alimodel.APILoadBalancerModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.NetworkModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.RAMModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.SSHKeyModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.FirewallModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.ExternalAccessModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
)
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
// &openstackmodel.APILBModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &clusterLifecycle},
&openstackmodel.NetworkModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &networkLifecycle},
&openstackmodel.SSHKeyModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
&openstackmodel.FirewallModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
)
default:
fileModels = append(fileModels, m)
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
}
@ -793,7 +665,7 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
return err
}
taskMap, err := l.BuildTasks(modelStore, fileModels, assetBuilder, &stageAssetsLifecycle, c.LifecycleOverrides)
taskMap, err := l.BuildTasks(modelStore, assetBuilder, &stageAssetsLifecycle, c.LifecycleOverrides)
if err != nil {
return fmt.Errorf("error building tasks: %v", err)
}

View File

@ -18,10 +18,8 @@ package cloudup
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"reflect"
"strings"
"text/template"
@ -33,28 +31,16 @@ import (
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/assettasks"
"k8s.io/kops/upup/pkg/fi/loader"
"k8s.io/kops/upup/pkg/fi/utils"
"k8s.io/kops/util/pkg/reflectutils"
"k8s.io/kops/util/pkg/vfs"
)
const (
KEY_NAME = "name"
KEY_TYPE = "_type"
)
type Loader struct {
Cluster *kopsapi.Cluster
WorkDir string
ModelStore vfs.Path
Tags sets.String
TemplateFunctions template.FuncMap
typeMap map[string]reflect.Type
Resources map[string]fi.Resource
Builders []fi.ModelBuilder
@ -91,26 +77,10 @@ func (a *templateResource) Curry(args []string) fi.TemplateResource {
func (l *Loader) Init() {
l.tasks = make(map[string]fi.Task)
l.typeMap = make(map[string]reflect.Type)
l.Resources = make(map[string]fi.Resource)
l.TemplateFunctions = make(template.FuncMap)
}
func (l *Loader) AddTypes(types map[string]interface{}) {
for key, proto := range types {
_, exists := l.typeMap[key]
if exists {
klog.Fatalf("duplicate type key: %q", key)
}
t := reflect.TypeOf(proto)
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
l.typeMap[key] = t
}
}
func (l *Loader) executeTemplate(key string, d string, args []string) (string, error) {
t := template.New(key)
@ -145,10 +115,12 @@ func (l *Loader) executeTemplate(key string, d string, args []string) (string, e
}
func ignoreHandler(i *loader.TreeWalkItem) error {
return nil
// TODO remove after proving it's dead code
klog.Fatalf("ignoreHandler called on %s", i.Path)
return fmt.Errorf("ignoreHandler not implemented")
}
func (l *Loader) BuildTasks(modelStore vfs.Path, models []string, assetBuilder *assets.AssetBuilder, lifecycle *fi.Lifecycle, lifecycleOverrides map[string]fi.Lifecycle) (map[string]fi.Task, error) {
func (l *Loader) BuildTasks(modelStore vfs.Path, assetBuilder *assets.AssetBuilder, lifecycle *fi.Lifecycle, lifecycleOverrides map[string]fi.Lifecycle) (map[string]fi.Task, error) {
// Second pass: load everything else
tw := &loader.TreeWalker{
DefaultHandler: l.objectHandler,
@ -161,12 +133,10 @@ func (l *Loader) BuildTasks(modelStore vfs.Path, models []string, assetBuilder *
Tags: l.Tags,
}
for _, model := range models {
modelDir := modelStore.Join(model)
err := tw.Walk(modelDir)
if err != nil {
return nil, err
}
modelDir := modelStore.Join("cloudup")
err := tw.Walk(modelDir)
if err != nil {
return nil, err
}
for _, builder := range l.Builders {
@ -188,7 +158,7 @@ func (l *Loader) BuildTasks(modelStore vfs.Path, models []string, assetBuilder *
if err := l.addAssetFileCopyTasks(assetBuilder.FileAssets, lifecycle); err != nil {
return nil, err
}
err := l.processDeferrals()
err = l.processDeferrals()
if err != nil {
return nil, err
}
@ -367,113 +337,9 @@ func (l *Loader) resourceHandler(i *loader.TreeWalkItem) error {
}
func (l *Loader) objectHandler(i *loader.TreeWalkItem) error {
klog.V(8).Infof("Reading %s", i.Path)
contents, err := i.ReadString()
if err != nil {
return err
}
data, err := l.executeTemplate(i.RelativePath, contents, nil)
if err != nil {
return err
}
objects, err := l.loadYamlObjects(i.RelativePath, data)
if err != nil {
return err
}
for k, v := range objects {
_, found := l.tasks[k]
if found {
return fmt.Errorf("found duplicate object: %q", k)
}
l.tasks[k] = v.(fi.Task)
}
return nil
}
func (l *Loader) loadYamlObjects(key string, data string) (map[string]interface{}, error) {
var o map[string]interface{}
if strings.TrimSpace(data) != "" {
err := utils.YamlUnmarshal([]byte(data), &o)
if err != nil {
// TODO: It would be nice if yaml returned us the line number here
klog.Infof("error parsing yaml. yaml follows:")
for i, line := range strings.Split(string(data), "\n") {
fmt.Fprintf(os.Stderr, "%3d: %s\n", i, line)
}
return nil, fmt.Errorf("error parsing yaml %q: %v", key, err)
}
}
return l.loadObjectMap(key, o)
}
func (l *Loader) loadObjectMap(key string, data map[string]interface{}) (map[string]interface{}, error) {
loaded := make(map[string]interface{})
for k, v := range data {
typeID := ""
name := ""
// If the name & type are not specified in the values,
// we infer them from the key (first component -> typeid, last component -> name)
if vMap, ok := v.(map[string]interface{}); ok {
if s, ok := vMap[KEY_TYPE]; ok {
typeID = s.(string)
}
if s, ok := vMap[KEY_NAME]; ok {
name = s.(string)
}
}
inferredName := false
if name == "" {
firstSlash := strings.Index(k, "/")
name = k[firstSlash+1:]
inferredName = true
}
if typeID == "" {
firstSlash := strings.Index(k, "/")
if firstSlash != -1 {
typeID = k[:firstSlash]
}
if typeID == "" {
return nil, fmt.Errorf("cannot determine type for %q", k)
}
}
t, found := l.typeMap[typeID]
if !found {
return nil, fmt.Errorf("unknown type %q (in %q)", typeID, key)
}
o := reflect.New(t)
// TODO replace with partial unmarshal...
jsonValue, err := json.Marshal(v)
if err != nil {
return nil, fmt.Errorf("error marshaling to json: %v", err)
}
err = json.Unmarshal(jsonValue, o.Interface())
if err != nil {
klog.V(2).Infof("JSON was %q", string(jsonValue))
return nil, fmt.Errorf("error parsing %q: %v", key, err)
}
klog.V(4).Infof("Built %s:%s => %v", key, k, o.Interface())
if inferredName {
hn, ok := o.Interface().(fi.HasSetName)
if ok {
hn.SetName(name)
}
}
loaded[k] = o.Interface()
}
return loaded, nil
// TODO remove after proving it's dead code
klog.Fatalf("objectHandler called on %s", i.Path)
return fmt.Errorf("objectHandler not implemented")
}
func (l *Loader) populateResource(rh *fi.ResourceHolder, resource fi.Resource, args []string) error {

View File

@ -21,7 +21,6 @@ import (
"fmt"
"net"
"strings"
"text/template"
"k8s.io/klog"
@ -31,7 +30,6 @@ import (
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/components/etcdmanager"
nodeauthorizer "k8s.io/kops/pkg/model/components/node-authorizer"
@ -252,26 +250,6 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
cluster.Spec.DNSZone = dnsZone
}
tags, err := buildCloudupTags(cluster)
if err != nil {
return err
}
tf := &TemplateFunctions{
KopsModelContext: model.KopsModelContext{
Cluster: cluster,
},
tags: tags,
}
templateFunctions := make(template.FuncMap)
err = tf.AddTo(templateFunctions, secretStore)
if err != nil {
return err
}
if cluster.Spec.KubernetesVersion == "" {
return fmt.Errorf("KubernetesVersion is required")
}
@ -308,8 +286,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
}
specBuilder := &SpecBuilder{
OptionsLoader: loader.NewOptionsLoader(templateFunctions, codeModels),
Tags: tags,
OptionsLoader: loader.NewOptionsLoader(codeModels),
}
completed, err := specBuilder.BuildCompleteSpec(&cluster.Spec)
@ -324,7 +301,6 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
fullCluster := &kopsapi.Cluster{}
*fullCluster = *cluster
fullCluster.Spec = *completed
tf.Cluster = fullCluster
if errs := validation.ValidateCluster(fullCluster, true); len(errs) != 0 {
return fmt.Errorf("Completed cluster failed validation: %v", errs.ToAggregate())

View File

@ -17,7 +17,6 @@ limitations under the License.
package cloudup
import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
@ -27,8 +26,6 @@ import (
type SpecBuilder struct {
OptionsLoader *loader.OptionsLoader
Tags sets.String
}
func (l *SpecBuilder) BuildCompleteSpec(clusterSpec *kopsapi.ClusterSpec) (*kopsapi.ClusterSpec, error) {

View File

@ -9,7 +9,6 @@ go_library(
importpath = "k8s.io/kops/upup/pkg/fi/loader",
visibility = ["//visibility:public"],
deps = [
"//upup/pkg/fi/utils:go_default_library",
"//util/pkg/reflectutils:go_default_library",
"//util/pkg/vfs:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",

View File

@ -17,33 +17,16 @@ limitations under the License.
package loader
import (
"bytes"
"encoding/json"
"fmt"
"os"
"reflect"
"sort"
"strings"
"text/template"
"k8s.io/klog"
"k8s.io/kops/upup/pkg/fi/utils"
"k8s.io/kops/util/pkg/reflectutils"
)
const maxIterations = 10
type OptionsTemplate struct {
Name string
Tags []string
Template *template.Template
}
type OptionsLoader struct {
templates OptionsTemplateList
TemplateFunctions template.FuncMap
Builders []OptionsBuilder
}
@ -51,47 +34,14 @@ type OptionsBuilder interface {
BuildOptions(options interface{}) error
}
type OptionsTemplateList []*OptionsTemplate
func (a OptionsTemplateList) Len() int {
return len(a)
}
func (a OptionsTemplateList) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a OptionsTemplateList) Less(i, j int) bool {
l := a[i]
r := a[j]
// First ordering criteria: Execute things with fewer tags first (more generic)
if len(l.Tags) != len(r.Tags) {
return len(l.Tags) < len(r.Tags)
}
// TODO: lexicographic sort on tags, for full determinism?
// Final ordering criteria: order by name
return l.Name < r.Name
}
func NewOptionsLoader(templateFunctions template.FuncMap, builders []OptionsBuilder) *OptionsLoader {
func NewOptionsLoader(builders []OptionsBuilder) *OptionsLoader {
l := &OptionsLoader{}
l.TemplateFunctions = make(template.FuncMap)
for k, v := range templateFunctions {
l.TemplateFunctions[k] = v
}
l.Builders = builders
return l
}
func (l *OptionsLoader) AddTemplate(t *OptionsTemplate) {
l.templates = append(l.templates, t)
}
// iterate performs a single iteration of all the templates, executing each template in order
func (l *OptionsLoader) iterate(userConfig interface{}, current interface{}) (interface{}, error) {
sort.Sort(l.templates)
t := reflect.TypeOf(current).Elem()
next := reflect.New(t).Interface()
@ -99,33 +49,6 @@ func (l *OptionsLoader) iterate(userConfig interface{}, current interface{}) (in
// Copy the current state before applying rules; they act as defaults
reflectutils.JSONMergeStruct(next, current)
for _, t := range l.templates {
klog.V(2).Infof("executing template %s (tags=%s)", t.Name, t.Tags)
var buffer bytes.Buffer
err := t.Template.ExecuteTemplate(&buffer, t.Name, current)
if err != nil {
return nil, fmt.Errorf("error executing template %q: %v", t.Name, err)
}
yamlBytes := buffer.Bytes()
jsonBytes, err := utils.YAMLToJSON(yamlBytes)
if err != nil {
// TODO: It would be nice if yaml returned us the line number here
klog.Infof("error parsing yaml. yaml follows:")
for i, line := range strings.Split(string(yamlBytes), "\n") {
fmt.Fprintf(os.Stderr, "%3d: %s\n", i, line)
}
return nil, fmt.Errorf("error parsing yaml %q: %v", t.Name, err)
}
err = json.Unmarshal(jsonBytes, next)
if err != nil {
return nil, fmt.Errorf("error parsing yaml (converted to JSON) %q: %v", t.Name, err)
}
}
for _, t := range l.Builders {
klog.V(2).Infof("executing builder %T", t)
@ -164,29 +87,3 @@ func (l *OptionsLoader) Build(userConfig interface{}) (interface{}, error) {
options = nextOptions
}
}
// HandleOptions is the file handler for options files
// It builds a template with the file, and adds it to the list of options templates
func (l *OptionsLoader) HandleOptions(i *TreeWalkItem) error {
contents, err := i.ReadString()
if err != nil {
return err
}
t := template.New(i.RelativePath)
t.Funcs(l.TemplateFunctions)
_, err = t.Parse(contents)
if err != nil {
return fmt.Errorf("error parsing options template %q: %v", i.Path, err)
}
t.Option("missingkey=zero")
l.AddTemplate(&OptionsTemplate{
Name: i.RelativePath,
Tags: i.Tags,
Template: t,
})
return nil
}