Merge pull request #17573 from rifelpet/golangcilint-fixes

Golangci-lint v2 fixes
This commit is contained in:
Kubernetes Prow Robot 2025-08-23 00:07:05 -07:00 committed by GitHub
commit d3a7607637
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
76 changed files with 168 additions and 210 deletions

View File

@ -24,7 +24,6 @@ import (
"os" "os"
"github.com/blang/semver/v4" "github.com/blang/semver/v4"
"github.com/cert-manager/cert-manager/pkg/client/clientset/versioned"
certmanager "github.com/cert-manager/cert-manager/pkg/client/clientset/versioned" certmanager "github.com/cert-manager/cert-manager/pkg/client/clientset/versioned"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.uber.org/multierr" "go.uber.org/multierr"
@ -120,7 +119,7 @@ func RunApplyChannel(ctx context.Context, f *ChannelsFactory, out io.Writer, opt
return applyMenu(ctx, menu, f.VFSContext(), k8sClient, cmClient, dynamicClient, restMapper, options.Yes) return applyMenu(ctx, menu, f.VFSContext(), k8sClient, cmClient, dynamicClient, restMapper, options.Yes)
} }
func applyMenu(ctx context.Context, menu *channels.AddonMenu, vfsContext *vfs.VFSContext, k8sClient kubernetes.Interface, cmClient versioned.Interface, dynamicClient dynamic.Interface, restMapper *restmapper.DeferredDiscoveryRESTMapper, apply bool) error { func applyMenu(ctx context.Context, menu *channels.AddonMenu, vfsContext *vfs.VFSContext, k8sClient kubernetes.Interface, cmClient certmanager.Interface, dynamicClient dynamic.Interface, restMapper *restmapper.DeferredDiscoveryRESTMapper, apply bool) error {
// channelVersions is the list of installed addons in the cluster. // channelVersions is the list of installed addons in the cluster.
// It is keyed by <namespace>:<addon name>. // It is keyed by <namespace>:<addon name>.
channelVersions, err := getChannelVersions(ctx, k8sClient) channelVersions, err := getChannelVersions(ctx, k8sClient)
@ -198,7 +197,7 @@ func applyMenu(ctx context.Context, menu *channels.AddonMenu, vfsContext *vfs.VF
return merr return merr
} }
func getUpdates(ctx context.Context, menu *channels.AddonMenu, k8sClient kubernetes.Interface, cmClient versioned.Interface, channelVersions map[string]*channels.ChannelVersion) ([]*channels.AddonUpdate, []*channels.Addon, error) { func getUpdates(ctx context.Context, menu *channels.AddonMenu, k8sClient kubernetes.Interface, cmClient certmanager.Interface, channelVersions map[string]*channels.ChannelVersion) ([]*channels.AddonUpdate, []*channels.Addon, error) {
var updates []*channels.AddonUpdate var updates []*channels.AddonUpdate
var needUpdates []*channels.Addon var needUpdates []*channels.Addon
for _, addon := range menu.Addons { for _, addon := range menu.Addons {

View File

@ -181,7 +181,8 @@ func (m *MockClient) routerInterface(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
panic("error decoding create router interface request") panic("error decoding create router interface request")
} }
if parts[2] == "add_router_interface" { switch parts[2] {
case "add_router_interface":
subnet := m.subnets[createInterface.SubnetID] subnet := m.subnets[createInterface.SubnetID]
interfaces := m.routerInterfaces[routerID] interfaces := m.routerInterfaces[routerID]
interfaces = append(interfaces, routers.InterfaceInfo{ interfaces = append(interfaces, routers.InterfaceInfo{
@ -201,7 +202,7 @@ func (m *MockClient) routerInterface(w http.ResponseWriter, r *http.Request) {
}, },
} }
m.ports[port.ID] = port m.ports[port.ID] = port
} else if parts[2] == "remove_router_interface" { case "remove_router_interface":
interfaces := make([]routers.InterfaceInfo, 0) interfaces := make([]routers.InterfaceInfo, 0)
for _, i := range m.routerInterfaces[routerID] { for _, i := range m.routerInterfaces[routerID] {
if i.SubnetID != createInterface.SubnetID { if i.SubnetID != createInterface.SubnetID {
@ -209,7 +210,7 @@ func (m *MockClient) routerInterface(w http.ResponseWriter, r *http.Request) {
} }
} }
m.routerInterfaces[routerID] = interfaces m.routerInterfaces[routerID] = interfaces
} else { default:
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
return return
} }

View File

@ -107,7 +107,7 @@ func (f *FakeDomainAPI) UpdateDNSZoneRecords(req *domain.UpdateDNSZoneRecordsReq
break break
} }
} }
if found == false { if !found {
return nil, fmt.Errorf("could not find record %s to delete", *change.Delete.ID) return nil, fmt.Errorf("could not find record %s to delete", *change.Delete.ID)
} }

View File

@ -161,7 +161,7 @@ func (s *Server) bootstrap(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
klog.Infof("bootstrap %s read err: %v", r.RemoteAddr, err) klog.Infof("bootstrap %s read err: %v", r.RemoteAddr, err)
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
_, _ = w.Write([]byte(fmt.Sprintf("bootstrap %s failed to read body: %v", r.RemoteAddr, err))) _, _ = fmt.Fprintf(w, "bootstrap %s failed to read body: %v", r.RemoteAddr, err)
return return
} }
@ -208,7 +208,7 @@ func (s *Server) bootstrap(w http.ResponseWriter, r *http.Request) {
if err := json.Unmarshal(body, req); err != nil { if err := json.Unmarshal(body, req); err != nil {
klog.Infof("bootstrap %s decode err: %v", r.RemoteAddr, err) klog.Infof("bootstrap %s decode err: %v", r.RemoteAddr, err)
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
_, _ = w.Write([]byte(fmt.Sprintf("failed to decode: %v", err))) _, _ = fmt.Fprintf(w, "failed to decode: %v", err)
return return
} }
@ -264,7 +264,7 @@ func (s *Server) bootstrap(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
klog.Infof("bootstrap %s cert %q issue err: %v", r.RemoteAddr, name, err) klog.Infof("bootstrap %s cert %q issue err: %v", r.RemoteAddr, name, err)
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
_, _ = w.Write([]byte(fmt.Sprintf("failed to issue %q: %v", name, err))) _, _ = fmt.Fprintf(w, "failed to issue %q: %v", name, err)
return return
} }
resp.Certs[name] = cert resp.Certs[name] = cert

View File

@ -858,7 +858,7 @@ func parseCloudLabels(s string) (map[string]string, error) {
// Replace commas with newlines to allow a single pass with csv.Reader. // Replace commas with newlines to allow a single pass with csv.Reader.
// We can't use csv.Reader for the initial split because it would see each key=value record as a single field // We can't use csv.Reader for the initial split because it would see each key=value record as a single field
// and significantly complicates using quoted fields as keys or values. // and significantly complicates using quoted fields as keys or values.
records := strings.Replace(s, ",", "\n", -1) records := strings.ReplaceAll(s, ",", "\n")
// Let the CSV library do the heavy-lifting in handling nested ='s // Let the CSV library do the heavy-lifting in handling nested ='s
r := csv.NewReader(strings.NewReader(records)) r := csv.NewReader(strings.NewReader(records))

View File

@ -1605,7 +1605,7 @@ func (i *integrationTest) runTestTerraformAWS(t *testing.T) {
"aws_cloudwatch_event_rule_" + awsup.GetClusterName40(i.clusterName) + "-SpotInterruption_event_pattern", "aws_cloudwatch_event_rule_" + awsup.GetClusterName40(i.clusterName) + "-SpotInterruption_event_pattern",
"aws_cloudwatch_event_rule_" + awsup.GetClusterName40(i.clusterName) + "-InstanceStateChange_event_pattern", "aws_cloudwatch_event_rule_" + awsup.GetClusterName40(i.clusterName) + "-InstanceStateChange_event_pattern",
"aws_cloudwatch_event_rule_" + awsup.GetClusterName40(i.clusterName) + "-InstanceScheduledChange_event_pattern", "aws_cloudwatch_event_rule_" + awsup.GetClusterName40(i.clusterName) + "-InstanceScheduledChange_event_pattern",
"aws_sqs_queue_" + strings.Replace(i.clusterName, ".", "-", -1) + "-nth_policy", "aws_sqs_queue_" + strings.ReplaceAll(i.clusterName, ".", "-") + "-nth_policy",
}...) }...)
} }
if i.nthRebalance { if i.nthRebalance {
@ -1633,7 +1633,8 @@ func (i *integrationTest) runTestPhase(t *testing.T, phase cloudup.Phase) {
expectedFilenames := i.expectTerraformFilenames expectedFilenames := i.expectTerraformFilenames
if phase == cloudup.PhaseSecurity { switch phase {
case cloudup.PhaseSecurity:
expectedFilenames = []string{ expectedFilenames = []string{
"aws_iam_role_masters." + i.clusterName + "_policy", "aws_iam_role_masters." + i.clusterName + "_policy",
"aws_iam_role_nodes." + i.clusterName + "_policy", "aws_iam_role_nodes." + i.clusterName + "_policy",
@ -1648,7 +1649,7 @@ func (i *integrationTest) runTestPhase(t *testing.T, phase cloudup.Phase) {
"aws_launch_template_bastion." + i.clusterName + "_user_data", "aws_launch_template_bastion." + i.clusterName + "_user_data",
}...) }...)
} }
} else if phase == cloudup.PhaseCluster { case cloudup.PhaseCluster:
expectedFilenames = []string{ expectedFilenames = []string{
"aws_launch_template_nodes." + i.clusterName + "_user_data", "aws_launch_template_nodes." + i.clusterName + "_user_data",
} }

View File

@ -56,7 +56,7 @@ func (o *LifecycleTestOptions) AddDefaults() {
o.Version = "v1alpha2" o.Version = "v1alpha2"
} }
if o.ClusterName == "" { if o.ClusterName == "" {
o.ClusterName = strings.Replace(o.SrcDir, "_", "", -1) + ".example.com" o.ClusterName = strings.ReplaceAll(o.SrcDir, "_", "") + ".example.com"
} }
o.SrcDir = "../../tests/integration/update_cluster/" + o.SrcDir o.SrcDir = "../../tests/integration/update_cluster/" + o.SrcDir

View File

@ -520,7 +520,8 @@ func decorateWithMixedInstancesPolicy(instanceGroup *kops.InstanceGroup, usageCl
ig := instanceGroup ig := instanceGroup
ig.Spec.MachineType = instanceSelections[0] ig.Spec.MachineType = instanceSelections[0]
if usageClass == ec2types.UsageClassTypeSpot { switch usageClass {
case ec2types.UsageClassTypeSpot:
ondemandBase := int64(0) ondemandBase := int64(0)
ondemandAboveBase := int64(0) ondemandAboveBase := int64(0)
spotAllocationStrategy := "capacity-optimized" spotAllocationStrategy := "capacity-optimized"
@ -530,11 +531,11 @@ func decorateWithMixedInstancesPolicy(instanceGroup *kops.InstanceGroup, usageCl
OnDemandAboveBase: &ondemandAboveBase, OnDemandAboveBase: &ondemandAboveBase,
SpotAllocationStrategy: &spotAllocationStrategy, SpotAllocationStrategy: &spotAllocationStrategy,
} }
} else if usageClass == ec2types.UsageClassTypeOnDemand { case ec2types.UsageClassTypeOnDemand:
ig.Spec.MixedInstancesPolicy = &kops.MixedInstancesPolicySpec{ ig.Spec.MixedInstancesPolicy = &kops.MixedInstancesPolicySpec{
Instances: instanceSelections, Instances: instanceSelections,
} }
} else { default:
return nil, fmt.Errorf("error node usage class not supported") return nil, fmt.Errorf("error node usage class not supported")
} }

View File

@ -36,7 +36,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kops/cmd/kops/util" "k8s.io/kops/cmd/kops/util"
"k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops" kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/validation" "k8s.io/kops/pkg/validation"
"k8s.io/kops/util/pkg/tables" "k8s.io/kops/util/pkg/tables"
@ -63,7 +62,7 @@ var (
type ValidateClusterOptions struct { type ValidateClusterOptions struct {
ClusterName string ClusterName string
InstanceGroupRoles []kops.InstanceGroupRole InstanceGroupRoles []kopsapi.InstanceGroupRole
output string output string
wait time.Duration wait time.Duration
count int count int
@ -71,7 +70,7 @@ type ValidateClusterOptions struct {
kubeconfig string kubeconfig string
// filterInstanceGroups is a function that returns true if the instance group should be validated // filterInstanceGroups is a function that returns true if the instance group should be validated
filterInstanceGroups func(ig *kops.InstanceGroup) bool filterInstanceGroups func(ig *kopsapi.InstanceGroup) bool
// filterPodsForValidation is a function that returns true if the pod should be validated // filterPodsForValidation is a function that returns true if the pod should be validated
filterPodsForValidation func(pod *v1.Pod) bool filterPodsForValidation func(pod *v1.Pod) bool

View File

@ -201,9 +201,10 @@ func (c *NodeController) updateNodeRecords(node *v1.Node) string {
for _, a := range node.Status.Addresses { for _, a := range node.Status.Addresses {
var roleType string var roleType string
if a.Type == v1.NodeInternalIP { switch a.Type {
case v1.NodeInternalIP:
roleType = dns.RoleTypeInternal roleType = dns.RoleTypeInternal
} else if a.Type == v1.NodeExternalIP { case v1.NodeExternalIP:
roleType = dns.RoleTypeExternal roleType = dns.RoleTypeExternal
} }
var recordType dns.RecordType = dns.RecordTypeA var recordType dns.RecordType = dns.RecordTypeA

View File

@ -152,7 +152,8 @@ func (c *ServiceController) updateServiceRecords(service *v1.Service) string {
if len(specExternal) != 0 || len(specInternal) != 0 { if len(specExternal) != 0 || len(specInternal) != 0 {
var ingresses []dns.Record var ingresses []dns.Record
if service.Spec.Type == v1.ServiceTypeLoadBalancer { switch service.Spec.Type {
case v1.ServiceTypeLoadBalancer:
for i := range service.Status.LoadBalancer.Ingress { for i := range service.Status.LoadBalancer.Ingress {
ingress := &service.Status.LoadBalancer.Ingress[i] ingress := &service.Status.LoadBalancer.Ingress[i]
if ingress.Hostname != "" { if ingress.Hostname != "" {
@ -175,7 +176,7 @@ func (c *ServiceController) updateServiceRecords(service *v1.Service) string {
klog.V(4).Infof("Found A record for service %s/%s: %q", service.Namespace, service.Name, ingress.IP) klog.V(4).Infof("Found A record for service %s/%s: %q", service.Namespace, service.Name, ingress.IP)
} }
} }
} else if service.Spec.Type == v1.ServiceTypeNodePort { case v1.ServiceTypeNodePort:
var roleType string var roleType string
if len(specExternal) != 0 && len(specInternal) != 0 { if len(specExternal) != 0 && len(specInternal) != 0 {
klog.Warningln("DNS Records not possible for both Internal and Externals IPs.") klog.Warningln("DNS Records not possible for both Internal and Externals IPs.")
@ -190,7 +191,7 @@ func (c *ServiceController) updateServiceRecords(service *v1.Service) string {
Value: dns.AliasForNodesInRole("node", roleType), Value: dns.AliasForNodesInRole("node", roleType),
}) })
klog.V(4).Infof("Setting internal alias for NodePort service %s/%s", service.Namespace, service.Name) klog.V(4).Infof("Setting internal alias for NodePort service %s/%s", service.Namespace, service.Name)
} else { default:
// TODO: Emit event so that users are informed of this // TODO: Emit event so that users are informed of this
klog.V(2).Infof("Cannot expose service %s/%s of type %q", service.Namespace, service.Name, service.Spec.Type) klog.V(2).Infof("Cannot expose service %s/%s of type %q", service.Namespace, service.Name, service.Spec.Type)
} }

View File

@ -46,6 +46,6 @@ func (m *ManagedZonesService) List(project string) interfaces.ManagedZonesListCa
} }
func (m *ManagedZonesService) NewManagedZone(dnsName string) interfaces.ManagedZone { func (m *ManagedZonesService) NewManagedZone(dnsName string) interfaces.ManagedZone {
name := "x" + strings.Replace(string(uuid.NewUUID()), "-", "", -1)[0:30] // Unique name, strip out the "-" chars to shorten it, start with a lower case alpha, and truncate to Cloud DNS 32 character limit name := "x" + strings.ReplaceAll(string(uuid.NewUUID()), "-", "")[0:30] // Unique name, strip out the "-" chars to shorten it, start with a lower case alpha, and truncate to Cloud DNS 32 character limit
return &ManagedZone{impl: &dns.ManagedZone{Name: name, Description: "Kubernetes Federated Service", DnsName: dnsName}} return &ManagedZone{impl: &dns.ManagedZone{Name: name, Description: "Kubernetes Federated Service", DnsName: dnsName}}
} }

View File

@ -795,7 +795,7 @@ func (b *KubeAPIServerBuilder) buildPod(ctx context.Context, kubeAPIServer *kops
} }
for _, path := range b.SSLHostPaths() { for _, path := range b.SSLHostPaths() {
name := strings.Replace(path, "/", "", -1) name := strings.ReplaceAll(path, "/", "")
kubemanifest.AddHostPathMapping(pod, container, name, path) kubemanifest.AddHostPathMapping(pod, container, name, path)
} }

View File

@ -261,7 +261,7 @@ func (b *KubeControllerManagerBuilder) buildPod(kcm *kops.KubeControllerManagerC
container.Args = append(container.Args, sortedStrings(flags)...) container.Args = append(container.Args, sortedStrings(flags)...)
} }
for _, path := range b.SSLHostPaths() { for _, path := range b.SSLHostPaths() {
name := strings.Replace(path, "/", "", -1) name := strings.ReplaceAll(path, "/", "")
kubemanifest.AddHostPathMapping(pod, container, name, path) kubemanifest.AddHostPathMapping(pod, container, name, path)
} }

View File

@ -101,8 +101,8 @@ func TestTaintsApplied(t *testing.T) {
} }
func stringSlicesEqual(exp, other []string) bool { func stringSlicesEqual(exp, other []string) bool {
sort.Sort(sort.StringSlice(exp)) sort.Strings(exp)
sort.Sort(sort.StringSlice(other)) sort.Strings(other)
if exp == nil && other != nil { if exp == nil && other != nil {
return false return false
} }

View File

@ -1629,13 +1629,14 @@ func validateCalicoAutoDetectionMethod(fldPath *field.Path, runtime string, vers
case "can-reach": case "can-reach":
destStr := method[1] destStr := method[1]
ip := netutils.ParseIPSloppy(destStr) ip := netutils.ParseIPSloppy(destStr)
if version == ipv4.Version { switch version {
case ipv4.Version:
if ip == nil || ip.To4() == nil { if ip == nil || ip.To4() == nil {
return field.ErrorList{field.Invalid(fldPath, runtime, "must be a valid IPv4 address")} return field.ErrorList{field.Invalid(fldPath, runtime, "must be a valid IPv4 address")}
} else { } else {
return nil return nil
} }
} else if version == ipv6.Version { case ipv6.Version:
if ip == nil || ip.To4() != nil { if ip == nil || ip.To4() != nil {
return field.ErrorList{field.Invalid(fldPath, runtime, "must be a valid IPv6 address")} return field.ErrorList{field.Invalid(fldPath, runtime, "must be a valid IPv6 address")}
} else { } else {

View File

@ -62,9 +62,10 @@ func (req *putResource) Run(s *MockKubeAPIServer) error {
var updated *unstructured.Unstructured var updated *unstructured.Unstructured
if req.SubResource == "" { switch req.SubResource {
case "":
updated = body updated = body
} else if req.SubResource == "status" { case "status":
updated = existing.DeepCopyObject().(*unstructured.Unstructured) updated = existing.DeepCopyObject().(*unstructured.Unstructured)
newStatus := body.Object["status"] newStatus := body.Object["status"]
if newStatus == nil { if newStatus == nil {
@ -72,7 +73,7 @@ func (req *putResource) Run(s *MockKubeAPIServer) error {
return fmt.Errorf("status not specified on status subresource update") return fmt.Errorf("status not specified on status subresource update")
} }
updated.Object["status"] = newStatus updated.Object["status"] = newStatus
} else { default:
// TODO: We need to implement put properly // TODO: We need to implement put properly
return fmt.Errorf("unknown subresource %q", req.SubResource) return fmt.Errorf("unknown subresource %q", req.SubResource)
} }

View File

@ -374,7 +374,7 @@ func NormalizeImage(a *AssetBuilder, image string) string {
if !strings.HasPrefix(normalized, registryMirror+"/") { if !strings.HasPrefix(normalized, registryMirror+"/") {
// We can't nest arbitrarily // We can't nest arbitrarily
// Some risk of collisions, but also -- and __ in the names appear to be blocked by docker hub // Some risk of collisions, but also -- and __ in the names appear to be blocked by docker hub
normalized = strings.Replace(normalized, "/", "-", -1) normalized = strings.ReplaceAll(normalized, "/", "-")
normalized = registryMirror + "/" + normalized normalized = registryMirror + "/" + normalized
} }
image = normalized image = normalized

View File

@ -196,6 +196,6 @@ func restNamespaceForClusterName(clusterName string) string {
// We are not allowed dots, so we map them to dashes // We are not allowed dots, so we map them to dashes
// This can conflict, but this will simply be a limitation that we pass on to the user // This can conflict, but this will simply be a limitation that we pass on to the user
// i.e. it will not be possible to create a.b.example.com and a-b.example.com // i.e. it will not be possible to create a.b.example.com and a-b.example.com
namespace := strings.Replace(clusterName, ".", "-", -1) namespace := strings.ReplaceAll(clusterName, ".", "-")
return namespace return namespace
} }

View File

@ -118,10 +118,7 @@ func RunToolboxEnroll(ctx context.Context, f commandutils.Factory, out io.Writer
return err return err
} }
sudo := true sudo := options.SSHUser != "root"
if options.SSHUser == "root" {
sudo = false
}
sshTarget, err := NewSSHHost(ctx, options.Host, options.SSHPort, options.SSHUser, sudo) sshTarget, err := NewSSHHost(ctx, options.Host, options.SSHPort, options.SSHUser, sudo)
if err != nil { if err != nil {

View File

@ -607,7 +607,7 @@ func (a ByScoreDescending) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByScoreDescending) Less(i, j int) bool { func (a ByScoreDescending) Less(i, j int) bool {
if a[i].score != a[j].score { if a[i].score != a[j].score {
// ! to sort highest score first // ! to sort highest score first
return !(a[i].score < a[j].score) return a[i].score >= a[j].score
} }
// Use name to break ties consistently // Use name to break ties consistently
return a[i].subnet.Name < a[j].subnet.Name return a[i].subnet.Name < a[j].subnet.Name

View File

@ -311,11 +311,12 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.CloudupMode
lt.HTTPTokens = fi.PtrTo(ec2types.LaunchTemplateHttpTokensStateOptional) lt.HTTPTokens = fi.PtrTo(ec2types.LaunchTemplateHttpTokensStateOptional)
} }
if rootVolumeType == ec2types.VolumeTypeIo1 || rootVolumeType == ec2types.VolumeTypeIo2 { switch rootVolumeType {
case ec2types.VolumeTypeIo1, ec2types.VolumeTypeIo2:
if ig.Spec.RootVolume == nil || fi.ValueOf(ig.Spec.RootVolume.IOPS) < 100 { if ig.Spec.RootVolume == nil || fi.ValueOf(ig.Spec.RootVolume.IOPS) < 100 {
lt.RootVolumeIops = fi.PtrTo(int32(DefaultVolumeIonIops)) lt.RootVolumeIops = fi.PtrTo(int32(DefaultVolumeIonIops))
} }
} else if rootVolumeType == ec2types.VolumeTypeGp3 { case ec2types.VolumeTypeGp3:
if ig.Spec.RootVolume == nil || fi.ValueOf(ig.Spec.RootVolume.IOPS) < 3000 { if ig.Spec.RootVolume == nil || fi.ValueOf(ig.Spec.RootVolume.IOPS) < 3000 {
lt.RootVolumeIops = fi.PtrTo(int32(DefaultVolumeGp3Iops)) lt.RootVolumeIops = fi.PtrTo(int32(DefaultVolumeGp3Iops))
} }
@ -324,7 +325,7 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.CloudupMode
} else { } else {
lt.RootVolumeThroughput = fi.PtrTo(int32(fi.ValueOf(ig.Spec.RootVolume.Throughput))) lt.RootVolumeThroughput = fi.PtrTo(int32(fi.ValueOf(ig.Spec.RootVolume.Throughput)))
} }
} else { default:
lt.RootVolumeIops = nil lt.RootVolumeIops = nil
} }

View File

@ -265,7 +265,7 @@ func (b *KopsModelContext) CloudTags(name string, shared bool) map[string]string
} }
case kops.CloudProviderScaleway: case kops.CloudProviderScaleway:
for k, v := range b.Cluster.Spec.CloudLabels { for k, v := range b.Cluster.Spec.CloudLabels {
if k == scaleway.TagClusterName && shared == true { if k == scaleway.TagClusterName && shared {
klog.V(4).Infof("Skipping %q tag for shared resource", scaleway.TagClusterName) klog.V(4).Infof("Skipping %q tag for shared resource", scaleway.TagClusterName)
continue continue
} }

View File

@ -39,7 +39,7 @@ func (b *NetworkModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
return nil return nil
} }
clusterName := strings.Replace(b.ClusterName(), ".", "-", -1) clusterName := strings.ReplaceAll(b.ClusterName(), ".", "-")
vpcName := "vpc-" + clusterName vpcName := "vpc-" + clusterName
// Create a separate vpc for this cluster. // Create a separate vpc for this cluster.

View File

@ -283,10 +283,7 @@ func (b *AutoscalingGroupModelBuilder) splitToZones(ig *kops.InstanceGroup) (map
totalSize += targetSizes[i] totalSize += targetSizes[i]
} }
i := 0 i := 0
for { for totalSize < minSize {
if totalSize >= minSize {
break
}
targetSizes[i]++ targetSizes[i]++
totalSize++ totalSize++

View File

@ -287,7 +287,7 @@ func (b *MasterVolumeBuilder) addGCEVolume(c *fi.CloudupModelBuilderContext, pre
tags[gce.GceLabelNameEtcdClusterPrefix+etcd.Name] = gce.EncodeGCELabel(clusterSpec) tags[gce.GceLabelNameEtcdClusterPrefix+etcd.Name] = gce.EncodeGCELabel(clusterSpec)
// GCE disk names must match the following regular expression: '[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?' // GCE disk names must match the following regular expression: '[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?'
prefix = strings.Replace(prefix, ".", "-", -1) prefix = strings.ReplaceAll(prefix, ".", "-")
if strings.IndexByte("0123456789-", prefix[0]) != -1 { if strings.IndexByte("0123456789-", prefix[0]) != -1 {
prefix = "d" + prefix prefix = "d" + prefix
} }
@ -320,8 +320,6 @@ func (b *MasterVolumeBuilder) addHetznerVolume(c *fi.CloudupModelBuilderContext,
Labels: tags, Labels: tags,
} }
c.AddTask(t) c.AddTask(t)
return
} }
func (b *MasterVolumeBuilder) addOpenstackVolume(c *fi.CloudupModelBuilderContext, name string, volumeSize int32, zone string, etcd kops.EtcdClusterSpec, m kops.EtcdMemberSpec, allMembers []string) error { func (b *MasterVolumeBuilder) addOpenstackVolume(c *fi.CloudupModelBuilderContext, name string, volumeSize int32, zone string, etcd kops.EtcdClusterSpec, m kops.EtcdMemberSpec, allMembers []string) error {
@ -430,6 +428,4 @@ func (b *MasterVolumeBuilder) addScalewayVolume(c *fi.CloudupModelBuilderContext
Type: fi.PtrTo(string(instance.VolumeVolumeTypeBSSD)), Type: fi.PtrTo(string(instance.VolumeVolumeTypeBSSD)),
} }
c.AddTask(t) c.AddTask(t)
return
} }

View File

@ -706,10 +706,7 @@ func (b *FirewallModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
sgMap := make(map[string]*openstacktasks.SecurityGroup) sgMap := make(map[string]*openstacktasks.SecurityGroup)
useVIPACL := false useVIPACL := b.UseLoadBalancerForAPI() && b.UseVIPACL()
if b.UseLoadBalancerForAPI() && b.UseVIPACL() {
useVIPACL = true
}
sg := &openstacktasks.SecurityGroup{ sg := &openstacktasks.SecurityGroup{
Name: s(b.APIResourceName()), Name: s(b.APIResourceName()),
Lifecycle: b.Lifecycle, Lifecycle: b.Lifecycle,
@ -729,11 +726,12 @@ func (b *FirewallModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
Lifecycle: b.Lifecycle, Lifecycle: b.Lifecycle,
RemoveGroup: false, RemoveGroup: false,
} }
if role == kops.InstanceGroupRoleBastion { switch role {
case kops.InstanceGroupRoleBastion:
sg.RemoveExtraRules = []string{"port=22"} sg.RemoveExtraRules = []string{"port=22"}
} else if role == kops.InstanceGroupRoleNode { case kops.InstanceGroupRoleNode:
sg.RemoveExtraRules = []string{"port=22", "port=10250"} sg.RemoveExtraRules = []string{"port=22", "port=10250"}
} else if role == kops.InstanceGroupRoleControlPlane { case kops.InstanceGroupRoleControlPlane:
sg.RemoveExtraRules = []string{"port=22", "port=443", "port=10250"} sg.RemoveExtraRules = []string{"port=22", "port=443", "port=10250"}
} }
c.AddTask(sg) c.AddTask(sg)

View File

@ -53,12 +53,9 @@ func (b *NetworkModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
c.AddTask(t) c.AddTask(t)
} }
needRouter := true
// Do not need router if there is no external network // Do not need router if there is no external network
if osSpec.Router == nil || osSpec.Router.ExternalNetwork == nil { needRouter := osSpec.Router != nil && osSpec.Router.ExternalNetwork != nil
needRouter = false routerName := strings.ReplaceAll(clusterName, ".", "-")
}
routerName := strings.Replace(clusterName, ".", "-", -1)
for _, sp := range b.Cluster.Spec.Networking.Subnets { for _, sp := range b.Cluster.Spec.Networking.Subnets {
// assumes that we do not need to create routers if we use existing subnets // assumes that we do not need to create routers if we use existing subnets
if sp.ID != "" { if sp.ID != "" {

View File

@ -88,7 +88,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.CloudupModelBuilderContex
return err return err
} }
sshKeyName := strings.Replace(sshKeyNameFull, ":", "_", -1) sshKeyName := strings.ReplaceAll(sshKeyNameFull, ":", "_")
igMeta := make(map[string]string) igMeta := make(map[string]string)
cloudTags, err := b.KopsModelContext.CloudTagsForInstanceGroup(ig) cloudTags, err := b.KopsModelContext.CloudTagsForInstanceGroup(ig)
@ -145,8 +145,8 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.CloudupModelBuilderContex
for i := int32(0); i < *ig.Spec.MinSize; i++ { for i := int32(0); i < *ig.Spec.MinSize; i++ {
// FIXME: Must ensure 63 or less characters // FIXME: Must ensure 63 or less characters
// replace all dots and _ with -, this is needed to get external cloudprovider working // replace all dots and _ with -, this is needed to get external cloudprovider working
iName := strings.Replace(strings.ToLower(fmt.Sprintf("%s-%d.%s", ig.Name, i+1, b.ClusterName())), "_", "-", -1) iName := strings.ReplaceAll(strings.ToLower(fmt.Sprintf("%s-%d.%s", ig.Name, i+1, b.ClusterName())), "_", "-")
instanceName := fi.PtrTo(strings.Replace(iName, ".", "-", -1)) instanceName := fi.PtrTo(strings.ReplaceAll(iName, ".", "-"))
var az *string var az *string
var subnets []*openstacktasks.Subnet var subnets []*openstacktasks.Subnet
@ -175,13 +175,13 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.CloudupModelBuilderContex
} }
// Create instance port task // Create instance port task
portName := fmt.Sprintf("%s-%s", "port", *instanceName) portName := fmt.Sprintf("%s-%s", "port", *instanceName)
portTagKopsName := strings.Replace( portTagKopsName := strings.ReplaceAll(
strings.Replace( strings.ReplaceAll(
strings.ToLower( strings.ToLower(
fmt.Sprintf("port-%s-%d", ig.Name, i+1), fmt.Sprintf("port-%s-%d", ig.Name, i+1),
), ),
"_", "-", -1, "_", "-",
), ".", "-", -1, ), ".", "-",
) )
portTask := &openstacktasks.Port{ portTask := &openstacktasks.Port{
Name: fi.PtrTo(portName), Name: fi.PtrTo(portName),

View File

@ -299,7 +299,7 @@ func AWSMultipartMIME(bootScript string, ig *kops.InstanceGroup) (string, error)
return "", err return "", err
} }
writer.Write([]byte(fmt.Sprintf("Content-Type: multipart/mixed; boundary=\"%s\"\r\n", boundary))) fmt.Fprintf(writer, "Content-Type: multipart/mixed; boundary=\"%s\"\r\n", boundary)
writer.Write([]byte("MIME-Version: 1.0\r\n\r\n")) writer.Write([]byte("MIME-Version: 1.0\r\n\r\n"))
var err error var err error
@ -317,7 +317,7 @@ func AWSMultipartMIME(bootScript string, ig *kops.InstanceGroup) (string, error)
} }
} }
writer.Write([]byte(fmt.Sprintf("\r\n--%s--\r\n", boundary))) fmt.Fprintf(writer, "\r\n--%s--\r\n", boundary)
writer.Flush() writer.Flush()
mimeWriter.Close() mimeWriter.Close()

View File

@ -369,9 +369,7 @@ func (n *nodeUpConfigBuilder) BuildConfig(ig *kops.InstanceGroup, wellKnownAddre
case kops.CloudProviderDO, kops.CloudProviderScaleway, kops.CloudProviderAzure: case kops.CloudProviderDO, kops.CloudProviderScaleway, kops.CloudProviderAzure:
// Use any IP address that is found (including public ones) // Use any IP address that is found (including public ones)
for _, additionalIP := range wellKnownAddresses[wellknownservices.KubeAPIServer] { controlPlaneIPs = append(controlPlaneIPs, wellKnownAddresses[wellknownservices.KubeAPIServer]...)
controlPlaneIPs = append(controlPlaneIPs, additionalIP)
}
} }
if cluster.UsesNoneDNS() { if cluster.UsesNoneDNS() {

View File

@ -190,20 +190,21 @@ func parsePEMPrivateKey(pemData []byte) (crypto.Signer, error) {
return nil, fmt.Errorf("could not parse private key (unable to decode PEM)") return nil, fmt.Errorf("could not parse private key (unable to decode PEM)")
} }
if block.Type == "RSA PRIVATE KEY" { switch block.Type {
case "RSA PRIVATE KEY":
klog.V(10).Infof("Parsing pem block: %q", block.Type) klog.V(10).Infof("Parsing pem block: %q", block.Type)
return x509.ParsePKCS1PrivateKey(block.Bytes) return x509.ParsePKCS1PrivateKey(block.Bytes)
} else if block.Type == "EC PRIVATE KEY" { case "EC PRIVATE KEY":
klog.V(10).Infof("Parsing pem block: %q", block.Type) klog.V(10).Infof("Parsing pem block: %q", block.Type)
return x509.ParseECPrivateKey(block.Bytes) return x509.ParseECPrivateKey(block.Bytes)
} else if block.Type == "PRIVATE KEY" { case "PRIVATE KEY":
klog.V(10).Infof("Parsing pem block: %q", block.Type) klog.V(10).Infof("Parsing pem block: %q", block.Type)
k, err := x509.ParsePKCS8PrivateKey(block.Bytes) k, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return k.(crypto.Signer), nil return k.(crypto.Signer), nil
} else { default:
klog.Infof("Ignoring unexpected PEM block: %q", block.Type) klog.Infof("Ignoring unexpected PEM block: %q", block.Type)
} }

View File

@ -47,19 +47,20 @@ func parsePEMPublicKey(pemData []byte) (crypto.PublicKey, error) {
return nil, fmt.Errorf("could not parse private key") return nil, fmt.Errorf("could not parse private key")
} }
if block.Type == "RSA PUBLIC KEY" { switch block.Type {
case "RSA PUBLIC KEY":
k, err := x509.ParsePKCS1PublicKey(block.Bytes) k, err := x509.ParsePKCS1PublicKey(block.Bytes)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return k, nil return k, nil
} else if block.Type == "PUBLIC KEY" { case "PUBLIC KEY":
k, err := x509.ParsePKIXPublicKey(block.Bytes) k, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return k.(crypto.PublicKey), nil return k.(crypto.PublicKey), nil
} else { default:
klog.Infof("Ignoring unexpected PEM block: %q", block.Type) klog.Infof("Ignoring unexpected PEM block: %q", block.Type)
} }

View File

@ -75,7 +75,7 @@ func listDroplets(cloud fi.Cloud, clusterName string) ([]*resources.Resource, er
c := cloud.(do.DOCloud) c := cloud.(do.DOCloud)
var resourceTrackers []*resources.Resource var resourceTrackers []*resources.Resource
clusterTag := "KubernetesCluster:" + strings.Replace(clusterName, ".", "-", -1) clusterTag := "KubernetesCluster:" + strings.ReplaceAll(clusterName, ".", "-")
droplets, err := c.GetAllDropletsByTag(clusterTag) droplets, err := c.GetAllDropletsByTag(clusterTag)
if err != nil { if err != nil {
@ -102,7 +102,7 @@ func listVolumes(cloud fi.Cloud, clusterName string) ([]*resources.Resource, err
c := cloud.(do.DOCloud) c := cloud.(do.DOCloud)
var resourceTrackers []*resources.Resource var resourceTrackers []*resources.Resource
volumeMatch := strings.Replace(clusterName, ".", "-", -1) volumeMatch := strings.ReplaceAll(clusterName, ".", "-")
volumes, err := c.GetAllVolumesByRegion() volumes, err := c.GetAllVolumesByRegion()
if err != nil { if err != nil {
@ -220,7 +220,7 @@ func listLoadBalancers(cloud fi.Cloud, clusterName string) ([]*resources.Resourc
c := cloud.(do.DOCloud) c := cloud.(do.DOCloud)
var resourceTrackers []*resources.Resource var resourceTrackers []*resources.Resource
clusterTag := "KubernetesCluster-Master:" + strings.Replace(clusterName, ".", "-", -1) clusterTag := "KubernetesCluster-Master:" + strings.ReplaceAll(clusterName, ".", "-")
lbs, err := c.GetAllLoadBalancers() lbs, err := c.GetAllLoadBalancers()
if err != nil { if err != nil {

View File

@ -39,10 +39,7 @@ const (
func (os *clusterDiscoveryOS) DeleteSubnetLBs(subnet subnets.Subnet) ([]*resources.Resource, error) { func (os *clusterDiscoveryOS) DeleteSubnetLBs(subnet subnets.Subnet) ([]*resources.Resource, error) {
var resourceTrackers []*resources.Resource var resourceTrackers []*resources.Resource
preExistingSubnet := false preExistingSubnet := !strings.HasSuffix(subnet.Name, os.clusterName)
if !strings.HasSuffix(subnet.Name, os.clusterName) {
preExistingSubnet = true
}
opts := loadbalancers.ListOpts{ opts := loadbalancers.ListOpts{
VipSubnetID: subnet.ID, VipSubnetID: subnet.ID,

View File

@ -39,7 +39,7 @@ const (
func (os *clusterDiscoveryOS) ListNetwork() ([]*resources.Resource, error) { func (os *clusterDiscoveryOS) ListNetwork() ([]*resources.Resource, error) {
var resourceTrackers []*resources.Resource var resourceTrackers []*resources.Resource
routerName := strings.Replace(os.clusterName, ".", "-", -1) routerName := strings.ReplaceAll(os.clusterName, ".", "-")
projectNetworks, err := os.osCloud.ListNetworks(networks.ListOpts{}) projectNetworks, err := os.osCloud.ListNetworks(networks.ListOpts{})
if err != nil { if err != nil {

View File

@ -39,10 +39,7 @@ func (os *clusterDiscoveryOS) ListPorts(network networks.Network) ([]*resources.
return nil, err return nil, err
} }
preExistingNet := true preExistingNet := os.clusterName != network.Name
if os.clusterName == network.Name {
preExistingNet = false
}
filteredPorts := []ports.Port{} filteredPorts := []ports.Port{}
if preExistingNet { if preExistingNet {

View File

@ -29,8 +29,8 @@ const (
) )
func openstackKeyPairName(org string) string { func openstackKeyPairName(org string) string {
name := strings.Replace(org, ".", "-", -1) name := strings.ReplaceAll(org, ".", "-")
name = strings.Replace(name, ":", "_", -1) name = strings.ReplaceAll(name, ":", "_")
return name return name
} }

View File

@ -53,8 +53,8 @@ func AssertMatchesFile(t *testing.T, actual string, p string) {
expected := strings.TrimSpace(string(expectedBytes)) expected := strings.TrimSpace(string(expectedBytes))
// on windows, with git set to autocrlf, the reference files on disk have windows line endings // on windows, with git set to autocrlf, the reference files on disk have windows line endings
expected = strings.Replace(expected, "\r\n", "\n", -1) expected = strings.ReplaceAll(expected, "\r\n", "\n")
actual = strings.Replace(actual, "\r\n", "\n", -1) actual = strings.ReplaceAll(actual, "\r\n", "\n")
if actual == expected && err == nil { if actual == expected && err == nil {
return return

View File

@ -151,11 +151,7 @@ func UpdateHostsFileWithRecords(p string, mutator func(guarded []string) (*HostM
} }
// Ensure a single blank line // Ensure a single blank line
for { for len(out) > 0 {
if len(out) == 0 {
break
}
if out[len(out)-1] != "" { if out[len(out)-1] != "" {
break break
} }

View File

@ -44,7 +44,7 @@ func (p *SeedProvider) GetSeeds() ([]string, error) {
for _, droplet := range droplets { for _, droplet := range droplets {
for _, dropTag := range droplet.Tags { for _, dropTag := range droplet.Tags {
klog.V(4).Infof("Get Seeds - droplet found=%s,SeedProvider Tag=%s", dropTag, p.tag) klog.V(4).Infof("Get Seeds - droplet found=%s,SeedProvider Tag=%s", dropTag, p.tag)
if strings.Contains(dropTag, strings.Replace(p.tag, ".", "-", -1)) { if strings.Contains(dropTag, strings.ReplaceAll(p.tag, ".", "-")) {
klog.V(4).Infof("Tag matched for droplet tag =%s. Getting private IP", p.tag) klog.V(4).Infof("Tag matched for droplet tag =%s. Getting private IP", p.tag)
ip, err := droplet.PrivateIPv4() ip, err := droplet.PrivateIPv4()
if err == nil { if err == nil {

View File

@ -69,7 +69,7 @@ func GetClusterID() (string, error) {
for _, dropletTag := range dropletTags { for _, dropletTag := range dropletTags {
if strings.Contains(dropletTag, "KubernetesCluster:") { if strings.Contains(dropletTag, "KubernetesCluster:") {
clusterID = strings.Replace(dropletTag, ".", "-", -1) clusterID = strings.ReplaceAll(dropletTag, ".", "-")
tokens := strings.Split(clusterID, ":") tokens := strings.Split(clusterID, ":")
if len(tokens) != 2 { if len(tokens) != 2 {
@ -181,7 +181,7 @@ func (d *DOCloudProvider) getEtcdClusterSpec(vol godo.Volume) (*etcd.EtcdCluster
func (d *DOCloudProvider) GossipSeeds() (gossip.SeedProvider, error) { func (d *DOCloudProvider) GossipSeeds() (gossip.SeedProvider, error) {
for _, dropletTag := range d.dropletTags { for _, dropletTag := range d.dropletTags {
if strings.Contains(dropletTag, strings.Replace(d.ClusterID, ".", "-", -1)) { if strings.Contains(dropletTag, strings.ReplaceAll(d.ClusterID, ".", "-")) {
return gossipdo.NewSeedProvider(d.godoClient, dropletTag) return gossipdo.NewSeedProvider(d.godoClient, dropletTag)
} }
} }

View File

@ -132,8 +132,8 @@ func runTest(t *testing.T, srcDir string, fromVersion string, toVersion string)
actualString := strings.TrimSpace(strings.Join(actual, "\n---\n\n")) actualString := strings.TrimSpace(strings.Join(actual, "\n---\n\n"))
expectedString := strings.TrimSpace(string(expectedBytes)) expectedString := strings.TrimSpace(string(expectedBytes))
actualString = strings.Replace(actualString, "\r", "", -1) actualString = strings.ReplaceAll(actualString, "\r", "")
expectedString = strings.Replace(expectedString, "\r", "", -1) expectedString = strings.ReplaceAll(expectedString, "\r", "")
if actualString != expectedString { if actualString != expectedString {
diffString := diff.FormatDiff(expectedString, actualString) diffString := diff.FormatDiff(expectedString, actualString)

View File

@ -47,10 +47,7 @@ func BuildChanges(a, e, changes interface{}) bool {
} }
va := reflect.ValueOf(a) va := reflect.ValueOf(a)
aIsNil := false aIsNil := va.IsNil()
if va.IsNil() {
aIsNil = true
}
if !aIsNil { if !aIsNil {
va = va.Elem() va = va.Elem()

View File

@ -689,7 +689,7 @@ func (_ *NetworkLoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e
} }
func (e *NetworkLoadBalancer) TerraformName() string { func (e *NetworkLoadBalancer) TerraformName() string {
tfName := strings.Replace(fi.ValueOf(e.Name), ".", "-", -1) tfName := strings.ReplaceAll(fi.ValueOf(e.Name), ".", "-")
return tfName return tfName
} }

View File

@ -197,7 +197,7 @@ func (_ *SSHKey) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *SS
if e.IsExistingKey() { if e.IsExistingKey() {
return nil return nil
} }
tfName := strings.Replace(*e.Name, ":", "", -1) tfName := strings.ReplaceAll(*e.Name, ":", "")
publicKey, err := t.AddFileResource("aws_key_pair", tfName, "public_key", e.PublicKey, false) publicKey, err := t.AddFileResource("aws_key_pair", tfName, "public_key", e.PublicKey, false)
if err != nil { if err != nil {
return fmt.Errorf("error rendering PublicKey: %v", err) return fmt.Errorf("error rendering PublicKey: %v", err)
@ -225,7 +225,7 @@ func (e *SSHKey) TerraformLink() *terraformWriter.Literal {
if e.IsExistingKey() { if e.IsExistingKey() {
return terraformWriter.LiteralFromStringValue(*e.Name) return terraformWriter.LiteralFromStringValue(*e.Name)
} }
tfName := strings.Replace(*e.Name, ":", "", -1) tfName := strings.ReplaceAll(*e.Name, ":", "")
return terraformWriter.LiteralProperty("aws_key_pair", tfName, "id") return terraformWriter.LiteralProperty("aws_key_pair", tfName, "id")
} }

View File

@ -2184,13 +2184,14 @@ func findDNSName(cloud AWSCloud, cluster *kops.Cluster) (string, error) {
if cluster.Spec.API.LoadBalancer == nil { if cluster.Spec.API.LoadBalancer == nil {
return "", nil return "", nil
} }
if cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassClassic { switch cluster.Spec.API.LoadBalancer.Class {
case kops.LoadBalancerClassClassic:
if lb, err := cloud.FindELBByNameTag(name); err != nil { if lb, err := cloud.FindELBByNameTag(name); err != nil {
return "", fmt.Errorf("error looking for AWS ELB: %v", err) return "", fmt.Errorf("error looking for AWS ELB: %v", err)
} else if lb != nil { } else if lb != nil {
return aws.ToString(lb.DNSName), nil return aws.ToString(lb.DNSName), nil
} }
} else if cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork { case kops.LoadBalancerClassNetwork:
allLoadBalancers, err := ListELBV2LoadBalancers(ctx, cloud) allLoadBalancers, err := ListELBV2LoadBalancers(ctx, cloud)
if err != nil { if err != nil {
return "", fmt.Errorf("looking for AWS NLB: %w", err) return "", fmt.Errorf("looking for AWS NLB: %w", err)
@ -2406,7 +2407,7 @@ func GetInstanceCertificateNames(instances *ec2.DescribeInstancesOutput) (addrs
if iface.PrivateIpAddress != nil { if iface.PrivateIpAddress != nil {
addrs = append(addrs, *iface.PrivateIpAddress) addrs = append(addrs, *iface.PrivateIpAddress)
} }
if iface.Ipv6Addresses != nil && len(iface.Ipv6Addresses) > 0 { if len(iface.Ipv6Addresses) > 0 {
addrs = append(addrs, *iface.Ipv6Addresses[0].Ipv6Address) addrs = append(addrs, *iface.Ipv6Addresses[0].Ipv6Address)
} }
if iface.Association != nil && iface.Association.PublicIp != nil { if iface.Association != nil && iface.Association.PublicIp != nil {

View File

@ -25,7 +25,6 @@ import (
"sync" "sync"
"github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws"
awsv2 "github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/aws/arn"
autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types"
ec2 "github.com/aws/aws-sdk-go-v2/service/ec2" ec2 "github.com/aws/aws-sdk-go-v2/service/ec2"
@ -75,7 +74,7 @@ func ValidateRegion(ctx context.Context, region string) error {
} }
for _, r := range allRegions { for _, r := range allRegions {
name := awsv2.ToString(r.RegionName) name := aws.ToString(r.RegionName)
if name == region { if name == region {
return nil return nil
} }
@ -215,7 +214,7 @@ func GetClusterName40(cluster string) string {
// GetResourceName32 will attempt to calculate a meaningful name for a resource given a prefix // GetResourceName32 will attempt to calculate a meaningful name for a resource given a prefix
// Will never return a string longer than 32 chars // Will never return a string longer than 32 chars
func GetResourceName32(cluster string, prefix string) string { func GetResourceName32(cluster string, prefix string) string {
s := prefix + "-" + strings.Replace(cluster, ".", "-", -1) s := prefix + "-" + strings.ReplaceAll(cluster, ".", "-")
// We always compute the hash and add it, lest we trick users into assuming that we never do this // We always compute the hash and add it, lest we trick users into assuming that we never do this
opt := truncate.TruncateStringOptions{ opt := truncate.TruncateStringOptions{

View File

@ -89,10 +89,10 @@ func (nsg *NetworkSecurityGroup) Find(c *fi.CloudupContext) (*NetworkSecurityGro
DestinationAddressPrefix: rule.Properties.DestinationAddressPrefix, DestinationAddressPrefix: rule.Properties.DestinationAddressPrefix,
DestinationPortRange: rule.Properties.DestinationPortRange, DestinationPortRange: rule.Properties.DestinationPortRange,
} }
if rule.Properties.SourceAddressPrefixes != nil && len(rule.Properties.SourceAddressPrefixes) > 0 { if len(rule.Properties.SourceAddressPrefixes) > 0 {
nsr.SourceAddressPrefixes = rule.Properties.SourceAddressPrefixes nsr.SourceAddressPrefixes = rule.Properties.SourceAddressPrefixes
} }
if rule.Properties.SourceApplicationSecurityGroups != nil && len(rule.Properties.SourceApplicationSecurityGroups) > 0 { if len(rule.Properties.SourceApplicationSecurityGroups) > 0 {
var sasgs []*string var sasgs []*string
for _, sasg := range rule.Properties.SourceApplicationSecurityGroups { for _, sasg := range rule.Properties.SourceApplicationSecurityGroups {
asg, err := azure.ParseApplicationSecurityGroupID(*sasg.ID) asg, err := azure.ParseApplicationSecurityGroupID(*sasg.ID)
@ -105,10 +105,10 @@ func (nsg *NetworkSecurityGroup) Find(c *fi.CloudupContext) (*NetworkSecurityGro
} }
nsr.SourceApplicationSecurityGroupNames = sasgs nsr.SourceApplicationSecurityGroupNames = sasgs
} }
if rule.Properties.DestinationAddressPrefixes != nil && len(rule.Properties.DestinationAddressPrefixes) > 0 { if len(rule.Properties.DestinationAddressPrefixes) > 0 {
nsr.DestinationAddressPrefixes = rule.Properties.DestinationAddressPrefixes nsr.DestinationAddressPrefixes = rule.Properties.DestinationAddressPrefixes
} }
if rule.Properties.DestinationApplicationSecurityGroups != nil && len(rule.Properties.DestinationApplicationSecurityGroups) > 0 { if len(rule.Properties.DestinationApplicationSecurityGroups) > 0 {
var dasgs []*string var dasgs []*string
for _, dasg := range rule.Properties.DestinationApplicationSecurityGroups { for _, dasg := range rule.Properties.DestinationApplicationSecurityGroups {
asg, err := azure.ParseApplicationSecurityGroupID(*dasg.ID) asg, err := azure.ParseApplicationSecurityGroupID(*dasg.ID)

View File

@ -295,7 +295,7 @@ func (c *doCloudImplementation) GetApiIngressStatus(cluster *kops.Cluster) ([]fi
return false, fmt.Errorf("LoadBalancers.List returned error: %v", err) return false, fmt.Errorf("LoadBalancers.List returned error: %v", err)
} }
lbName := "api-" + strings.Replace(cluster.Name, ".", "-", -1) lbName := "api-" + strings.ReplaceAll(cluster.Name, ".", "-")
for _, lb := range loadBalancers { for _, lb := range loadBalancers {
if lb.Name == lbName { if lb.Name == lbName {
@ -352,7 +352,7 @@ func findEtcdStatus(c *doCloudImplementation, cluster *kops.Cluster) ([]kops.Etc
klog.V(8).Infof("findEtcdStatus status (from cloud): checking if volume with tag %q belongs to cluster", myTag) klog.V(8).Infof("findEtcdStatus status (from cloud): checking if volume with tag %q belongs to cluster", myTag)
// check if volume belongs to this cluster. // check if volume belongs to this cluster.
// tag will be in the format "KubernetesCluster:dev5-k8s-local" (where clusterName is dev5.k8s.local) // tag will be in the format "KubernetesCluster:dev5-k8s-local" (where clusterName is dev5.k8s.local)
clusterName := strings.Replace(cluster.Name, ".", "-", -1) clusterName := strings.ReplaceAll(cluster.Name, ".", "-")
if strings.Contains(myTag, fmt.Sprintf("%s:%s", TagKubernetesClusterNamePrefix, clusterName)) { if strings.Contains(myTag, fmt.Sprintf("%s:%s", TagKubernetesClusterNamePrefix, clusterName)) {
klog.V(10).Infof("findEtcdStatus cluster comparison matched for tag: %v", myTag) klog.V(10).Infof("findEtcdStatus cluster comparison matched for tag: %v", myTag)
// this volume belongs to our cluster, add this to our etcdClusterSpec. // this volume belongs to our cluster, add this to our etcdClusterSpec.
@ -453,7 +453,7 @@ func findInstanceGroups(c *doCloudImplementation, clusterName string) ([]DOInsta
var result []DOInstanceGroup var result []DOInstanceGroup
instanceGroupMap := make(map[string][]string) // map of instance group name with droplet ids instanceGroupMap := make(map[string][]string) // map of instance group name with droplet ids
clusterTag := "KubernetesCluster:" + strings.Replace(clusterName, ".", "-", -1) clusterTag := "KubernetesCluster:" + strings.ReplaceAll(clusterName, ".", "-")
droplets, err := c.GetAllDropletsByTag(clusterTag) droplets, err := c.GetAllDropletsByTag(clusterTag)
if err != nil { if err != nil {
return nil, fmt.Errorf("get all droplets for tag %s returned error. Error=%v", clusterTag, err) return nil, fmt.Errorf("get all droplets for tag %s returned error. Error=%v", clusterTag, err)

View File

@ -20,6 +20,6 @@ import "strings"
func SafeClusterName(clusterName string) string { func SafeClusterName(clusterName string) string {
// DO does not support . in tags / names // DO does not support . in tags / names
safeClusterName := strings.Replace(clusterName, ".", "-", -1) safeClusterName := strings.ReplaceAll(clusterName, ".", "-")
return safeClusterName return safeClusterName
} }

View File

@ -26,7 +26,6 @@ import (
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/do" "k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform" "k8s.io/kops/upup/pkg/fi/cloudup/terraform"
_ "k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/upup/pkg/fi/cloudup/terraformWriter" "k8s.io/kops/upup/pkg/fi/cloudup/terraformWriter"
) )

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/do" "k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform" "k8s.io/kops/upup/pkg/fi/cloudup/terraform"
_ "k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/upup/pkg/fi/cloudup/terraformWriter" "k8s.io/kops/upup/pkg/fi/cloudup/terraformWriter"
) )
@ -172,7 +171,7 @@ func (_ *SSHKey) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *SS
if e.IsExistingKey() { if e.IsExistingKey() {
return nil return nil
} }
tfName := strings.Replace(*e.Name, ":", "", -1) tfName := strings.ReplaceAll(*e.Name, ":", "")
publicKey, err := t.AddFileResource("digitalocean_ssh_key", tfName, "public_key", e.PublicKey, false) publicKey, err := t.AddFileResource("digitalocean_ssh_key", tfName, "public_key", e.PublicKey, false)
if err != nil { if err != nil {
return fmt.Errorf("error rendering PublicKey: %v", err) return fmt.Errorf("error rendering PublicKey: %v", err)
@ -199,7 +198,7 @@ func (e *SSHKey) TerraformLink() *terraformWriter.Literal {
if e.IsExistingKey() { if e.IsExistingKey() {
return terraformWriter.LiteralFromStringValue(*e.Name) return terraformWriter.LiteralFromStringValue(*e.Name)
} }
tfName := strings.Replace(*e.Name, ":", "", -1) tfName := strings.ReplaceAll(*e.Name, ":", "")
return terraformWriter.LiteralProperty("digitalocean_ssh_key", tfName, "id") return terraformWriter.LiteralProperty("digitalocean_ssh_key", tfName, "id")
} }

View File

@ -58,7 +58,7 @@ func EncodeGCELabel(s string) string {
// DecodeGCELabel reverse EncodeGCELabel, taking the encoded RFC1035 compatible value back to a string // DecodeGCELabel reverse EncodeGCELabel, taking the encoded RFC1035 compatible value back to a string
func DecodeGCELabel(s string) (string, error) { func DecodeGCELabel(s string) (string, error) {
uriForm := strings.Replace(s, "-", "%", -1) uriForm := strings.ReplaceAll(s, "-", "%")
v, err := url.QueryUnescape(uriForm) v, err := url.QueryUnescape(uriForm)
if err != nil { if err != nil {
return "", fmt.Errorf("cannot decode GCE label: %q", s) return "", fmt.Errorf("cannot decode GCE label: %q", s)

View File

@ -58,7 +58,7 @@ func ClusterPrefixedName(objectName string, clusterName string, maxLength int) s
} }
// GCE does not support . in tags / names // GCE does not support . in tags / names
safeClusterName := strings.Replace(clusterName, ".", "-", -1) safeClusterName := strings.ReplaceAll(clusterName, ".", "-")
opt := truncate.TruncateStringOptions{ opt := truncate.TruncateStringOptions{
MaxLength: prefixLength, MaxLength: prefixLength,
@ -79,7 +79,7 @@ func ClusterSuffixedName(objectName string, clusterName string, maxLength int) s
} }
// GCE does not support . in tags / names // GCE does not support . in tags / names
safeClusterName := strings.Replace(clusterName, ".", "-", -1) safeClusterName := strings.ReplaceAll(clusterName, ".", "-")
opt := truncate.TruncateStringOptions{ opt := truncate.TruncateStringOptions{
MaxLength: suffixLength, MaxLength: suffixLength,
@ -95,7 +95,7 @@ func ClusterSuffixedName(objectName string, clusterName string, maxLength int) s
// deprecated: prefer ClusterSuffixedName // deprecated: prefer ClusterSuffixedName
func SafeClusterName(clusterName string) string { func SafeClusterName(clusterName string) string {
// GCE does not support . in tags / names // GCE does not support . in tags / names
safeClusterName := strings.Replace(clusterName, ".", "-", -1) safeClusterName := strings.ReplaceAll(clusterName, ".", "-")
return safeClusterName return safeClusterName
} }
@ -113,7 +113,7 @@ func LabelForCluster(clusterName string) Label {
// SafeTruncatedClusterName returns a safe and truncated cluster name // SafeTruncatedClusterName returns a safe and truncated cluster name
func SafeTruncatedClusterName(clusterName string, maxLength int) string { func SafeTruncatedClusterName(clusterName string, maxLength int) string {
// GCE does not support . in tags / names // GCE does not support . in tags / names
safeClusterName := strings.Replace(clusterName, ".", "-", -1) safeClusterName := strings.ReplaceAll(clusterName, ".", "-")
opt := truncate.TruncateStringOptions{ opt := truncate.TruncateStringOptions{
MaxLength: maxLength, MaxLength: maxLength,

View File

@ -195,7 +195,8 @@ func updateSecondaryRanges(cloud gce.GCECloud, op string, e *Subnet) error {
} }
// Cannot add and remove ranges in the same call // Cannot add and remove ranges in the same call
if op == "add" { switch op {
case "add":
patch := false patch := false
for k, v := range expectedRanges { for k, v := range expectedRanges {
if actualRanges[k] != v { if actualRanges[k] != v {
@ -211,7 +212,7 @@ func updateSecondaryRanges(cloud gce.GCECloud, op string, e *Subnet) error {
if !patch { if !patch {
return nil return nil
} }
} else if op == "remove" { case "remove":
patch := false patch := false
if len(actualRanges) != len(expectedRanges) { if len(actualRanges) != len(expectedRanges) {
patch = true patch = true

View File

@ -66,10 +66,7 @@ func (c *openstackCloud) CreateInstance(opt servers.CreateOptsBuilder, scheduler
} }
func IsPortInUse(err error) bool { func IsPortInUse(err error) bool {
if gophercloud.ResponseCodeIs(err, http.StatusConflict) { return gophercloud.ResponseCodeIs(err, http.StatusConflict)
return true
}
return false
} }
// waitForStatusActive uses gopherclouds WaitFor() func to determine when the server becomes "ACTIVE". // waitForStatusActive uses gopherclouds WaitFor() func to determine when the server becomes "ACTIVE".

View File

@ -72,11 +72,12 @@ func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient,
return false, err return false, err
} }
provisioningStatus = loadbalancer.ProvisioningStatus provisioningStatus = loadbalancer.ProvisioningStatus
if loadbalancer.ProvisioningStatus == activeStatus { switch loadbalancer.ProvisioningStatus {
case activeStatus:
return true, nil return true, nil
} else if loadbalancer.ProvisioningStatus == errorStatus { case errorStatus:
return true, fmt.Errorf("loadbalancer has gone into ERROR state") return true, fmt.Errorf("loadbalancer has gone into ERROR state")
} else { default:
klog.Infof("Waiting for Loadbalancer to be ACTIVE...") klog.Infof("Waiting for Loadbalancer to be ACTIVE...")
return false, nil return false, nil
} }
@ -115,11 +116,7 @@ func NewLBTaskFromCloud(cloud openstack.OpenstackCloud, lifecycle fi.Lifecycle,
return nil, err return nil, err
} }
secGroup := true secGroup := find == nil || find.SecurityGroup != nil
if find != nil && find.SecurityGroup == nil {
secGroup = false
}
actual := &LB{ actual := &LB{
ID: fi.PtrTo(lb.ID), ID: fi.PtrTo(lb.ID),
Name: fi.PtrTo(lb.Name), Name: fi.PtrTo(lb.Name),

View File

@ -62,7 +62,7 @@ func (p *PoolMonitor) Find(context *fi.CloudupContext) (*PoolMonitor, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if rs == nil || len(rs) == 0 { if len(rs) == 0 {
return nil, nil return nil, nil
} else if len(rs) != 1 { } else if len(rs) != 1 {
return nil, fmt.Errorf("found multiple monitors with name: %s", fi.ValueOf(p.Name)) return nil, fmt.Errorf("found multiple monitors with name: %s", fi.ValueOf(p.Name))

View File

@ -163,7 +163,7 @@ func (_ *ServerGroup) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, cha
for currentLastIndex > fi.ValueOf(maxSize) { for currentLastIndex > fi.ValueOf(maxSize) {
iName := strings.ToLower(fmt.Sprintf("%s-%d.%s", igName, currentLastIndex, fi.ValueOf(a.ClusterName))) iName := strings.ToLower(fmt.Sprintf("%s-%d.%s", igName, currentLastIndex, fi.ValueOf(a.ClusterName)))
instanceName := strings.Replace(iName, ".", "-", -1) instanceName := strings.ReplaceAll(iName, ".", "-")
opts := servers.ListOpts{ opts := servers.ListOpts{
Name: fmt.Sprintf("^%s", igName), Name: fmt.Sprintf("^%s", igName),
} }

View File

@ -108,8 +108,8 @@ func (s *SSHKey) CheckChanges(a, e, changes *SSHKey) error {
} }
func openstackKeyPairName(org string) string { func openstackKeyPairName(org string) string {
name := strings.Replace(org, ".", "-", -1) name := strings.ReplaceAll(org, ".", "-")
name = strings.Replace(name, ":", "_", -1) name = strings.ReplaceAll(name, ":", "_")
return name return name
} }

View File

@ -192,14 +192,8 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup,
} }
hasGPU := false hasGPU := false
clusterNvidia := false clusterNvidia := cluster.Spec.Containerd != nil && cluster.Spec.Containerd.NvidiaGPU != nil && fi.ValueOf(cluster.Spec.Containerd.NvidiaGPU.Enabled)
if cluster.Spec.Containerd != nil && cluster.Spec.Containerd.NvidiaGPU != nil && fi.ValueOf(cluster.Spec.Containerd.NvidiaGPU.Enabled) { igNvidia := ig.Spec.Containerd != nil && ig.Spec.Containerd.NvidiaGPU != nil && fi.ValueOf(ig.Spec.Containerd.NvidiaGPU.Enabled)
clusterNvidia = true
}
igNvidia := false
if ig.Spec.Containerd != nil && ig.Spec.Containerd.NvidiaGPU != nil && fi.ValueOf(ig.Spec.Containerd.NvidiaGPU.Enabled) {
igNvidia = true
}
switch cluster.GetCloudProvider() { switch cluster.GetCloudProvider() {
case kops.CloudProviderAWS: case kops.CloudProviderAWS:

View File

@ -112,10 +112,8 @@ func (_ *DNSRecord) CheckChanges(actual, expected, changes *DNSRecord) error {
} }
func (d *DNSRecord) RenderScw(t *scaleway.ScwAPITarget, actual, expected, changes *DNSRecord) error { func (d *DNSRecord) RenderScw(t *scaleway.ScwAPITarget, actual, expected, changes *DNSRecord) error {
cloud := t.Cloud.(scaleway.ScwCloud)
if actual != nil { if actual != nil {
recordUpdated, err := cloud.DomainService().UpdateDNSZoneRecords(&domain.UpdateDNSZoneRecordsRequest{ recordUpdated, err := t.Cloud.DomainService().UpdateDNSZoneRecords(&domain.UpdateDNSZoneRecordsRequest{
DNSZone: fi.ValueOf(actual.DNSZone), DNSZone: fi.ValueOf(actual.DNSZone),
Changes: []*domain.RecordChange{ Changes: []*domain.RecordChange{
{ {
@ -138,7 +136,7 @@ func (d *DNSRecord) RenderScw(t *scaleway.ScwAPITarget, actual, expected, change
return nil return nil
} }
recordCreated, err := cloud.DomainService().UpdateDNSZoneRecords(&domain.UpdateDNSZoneRecordsRequest{ recordCreated, err := t.Cloud.DomainService().UpdateDNSZoneRecords(&domain.UpdateDNSZoneRecordsRequest{
DNSZone: fi.ValueOf(expected.DNSZone), DNSZone: fi.ValueOf(expected.DNSZone),
Changes: []*domain.RecordChange{ Changes: []*domain.RecordChange{
{ {

View File

@ -92,7 +92,7 @@ func (s *Instance) Find(c *fi.CloudupContext) (*Instance, error) {
alreadyTagged = true alreadyTagged = true
} }
} }
if alreadyTagged == true { if alreadyTagged {
continue continue
} }
@ -107,7 +107,7 @@ func (s *Instance) Find(c *fi.CloudupContext) (*Instance, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("checking image differences in server %s (%s): %w", server.Name, server.ID, err) return nil, fmt.Errorf("checking image differences in server %s (%s): %w", server.Name, server.ID, err)
} }
if diff == true { if diff {
needsUpdate = append(needsUpdate, server.ID) needsUpdate = append(needsUpdate, server.ID)
continue continue
} }
@ -117,7 +117,7 @@ func (s *Instance) Find(c *fi.CloudupContext) (*Instance, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("checking user-data differences in server %s (%s): %w", server.Name, server.ID, err) return nil, fmt.Errorf("checking user-data differences in server %s (%s): %w", server.Name, server.ID, err)
} }
if diff == true { if diff {
needsUpdate = append(needsUpdate, server.ID) needsUpdate = append(needsUpdate, server.ID)
} }
} }
@ -175,8 +175,7 @@ func (_ *Instance) CheckChanges(actual, expected, changes *Instance) error {
} }
func (_ *Instance) RenderScw(t *scaleway.ScwAPITarget, actual, expected, changes *Instance) error { func (_ *Instance) RenderScw(t *scaleway.ScwAPITarget, actual, expected, changes *Instance) error {
cloud := t.Cloud.(scaleway.ScwCloud) instanceService := t.Cloud.InstanceService()
instanceService := cloud.InstanceService()
zone := scw.Zone(fi.ValueOf(expected.Zone)) zone := scw.Zone(fi.ValueOf(expected.Zone))
userData, err := fi.ResourceAsBytes(*expected.UserData) userData, err := fi.ResourceAsBytes(*expected.UserData)
@ -216,7 +215,7 @@ func (_ *Instance) RenderScw(t *scaleway.ScwAPITarget, actual, expected, changes
// If newInstanceCount > 0, we need to create new instances for this group // If newInstanceCount > 0, we need to create new instances for this group
for i := 0; i < newInstanceCount; i++ { for i := 0; i < newInstanceCount; i++ {
// We create a unique name for each server // We create a unique name for each server
uniqueName, err := uniqueName(cloud, scaleway.ClusterNameFromTags(expected.Tags), fi.ValueOf(expected.Name)) uniqueName, err := uniqueName(t.Cloud, scaleway.ClusterNameFromTags(expected.Tags), fi.ValueOf(expected.Name))
if err != nil { if err != nil {
return fmt.Errorf("error rendering server group %s: computing unique name for server: %w", fi.ValueOf(expected.Name), err) return fmt.Errorf("error rendering server group %s: computing unique name for server: %w", fi.ValueOf(expected.Name), err)
} }
@ -288,14 +287,14 @@ func (_ *Instance) RenderScw(t *scaleway.ScwAPITarget, actual, expected, changes
// If newInstanceCount < 0, we need to delete instances of this group // If newInstanceCount < 0, we need to delete instances of this group
if newInstanceCount < 0 { if newInstanceCount < 0 {
igInstances, err := cloud.GetClusterServers(cloud.ClusterName(actual.Tags), actual.Name) igInstances, err := t.Cloud.GetClusterServers(t.Cloud.ClusterName(actual.Tags), actual.Name)
if err != nil { if err != nil {
return fmt.Errorf("error deleting instance: %w", err) return fmt.Errorf("error deleting instance: %w", err)
} }
for i := 0; i > newInstanceCount; i-- { for i := 0; i > newInstanceCount; i-- {
toDelete := igInstances[i*-1] toDelete := igInstances[i*-1]
err = cloud.DeleteServer(toDelete) err = t.Cloud.DeleteServer(toDelete)
if err != nil { if err != nil {
return fmt.Errorf("error deleting instance of group %s: %w", toDelete.Name, err) return fmt.Errorf("error deleting instance of group %s: %w", toDelete.Name, err)
} }
@ -457,7 +456,7 @@ func findFirstFreeIndex(existing []*instance.Server) int {
break break
} }
} }
if found == false { if !found {
return index return index
} }
} }

View File

@ -114,8 +114,6 @@ func (*SSHKey) RenderScw(t *scaleway.ScwAPITarget, actual, expected, changes *SS
return nil return nil
} }
cloud := t.Cloud.(scaleway.ScwCloud)
name := fi.ValueOf(expected.Name) name := fi.ValueOf(expected.Name)
if name == "" { if name == "" {
return fi.RequiredField("Name") return fi.RequiredField("Name")
@ -133,7 +131,7 @@ func (*SSHKey) RenderScw(t *scaleway.ScwAPITarget, actual, expected, changes *SS
keyArgs.PublicKey = d keyArgs.PublicKey = d
} }
key, err := cloud.IamService().CreateSSHKey(keyArgs) key, err := t.Cloud.IamService().CreateSSHKey(keyArgs)
if err != nil { if err != nil {
return fmt.Errorf("error creating SSH keypair: %w", err) return fmt.Errorf("error creating SSH keypair: %w", err)
} }

View File

@ -271,7 +271,7 @@ func (e *Elastigroup) Find(c *fi.CloudupContext) (*Elastigroup, error) {
// Tags. // Tags.
{ {
if lc.Tags != nil && len(lc.Tags) > 0 { if len(lc.Tags) > 0 {
actual.Tags = make(map[string]string) actual.Tags = make(map[string]string)
for _, tag := range lc.Tags { for _, tag := range lc.Tags {
actual.Tags[fi.ValueOf(tag.Key)] = fi.ValueOf(tag.Value) actual.Tags[fi.ValueOf(tag.Key)] = fi.ValueOf(tag.Value)
@ -350,7 +350,7 @@ func (e *Elastigroup) Find(c *fi.CloudupContext) (*Elastigroup, error) {
{ {
associatePublicIP := false associatePublicIP := false
if lc.NetworkInterfaces != nil && len(lc.NetworkInterfaces) > 0 { if len(lc.NetworkInterfaces) > 0 {
for _, iface := range lc.NetworkInterfaces { for _, iface := range lc.NetworkInterfaces {
if fi.ValueOf(iface.AssociatePublicIPAddress) { if fi.ValueOf(iface.AssociatePublicIPAddress) {
associatePublicIP = true associatePublicIP = true

View File

@ -160,7 +160,7 @@ func (o *LaunchSpec) Find(c *fi.CloudupContext) (*LaunchSpec, error) {
// Image. // Image.
{ {
// convert spec from api that reply for multi arch data only in spec.images // convert spec from api that reply for multi arch data only in spec.images
if spec.Images != nil && len(spec.Images) > 1 { if len(spec.Images) > 1 {
spec.SetImageId(fi.PtrTo(fi.ValueOf(spec.Images[0].ImageId))) spec.SetImageId(fi.PtrTo(fi.ValueOf(spec.Images[0].ImageId)))
actual.OtherArchitectureImages = append(actual.OtherArchitectureImages, fi.ValueOf(spec.Images[1].ImageId)) actual.OtherArchitectureImages = append(actual.OtherArchitectureImages, fi.ValueOf(spec.Images[1].ImageId))
} }

View File

@ -230,7 +230,7 @@ func (o *Ocean) Find(c *fi.CloudupContext) (*Ocean, error) {
// Tags. // Tags.
{ {
if lc.Tags != nil && len(lc.Tags) > 0 { if len(lc.Tags) > 0 {
actual.Tags = make(map[string]string) actual.Tags = make(map[string]string)
for _, tag := range lc.Tags { for _, tag := range lc.Tags {
actual.Tags[fi.ValueOf(tag.Key)] = fi.ValueOf(tag.Value) actual.Tags[fi.ValueOf(tag.Key)] = fi.ValueOf(tag.Value)

View File

@ -95,7 +95,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS
dest["SharedVPC"] = tf.SharedVPC dest["SharedVPC"] = tf.SharedVPC
// Remember that we may be on a different arch from the target. Hard-code for now. // Remember that we may be on a different arch from the target. Hard-code for now.
dest["replace"] = func(s, find, replace string) string { dest["replace"] = func(s, find, replace string) string {
return strings.Replace(s, find, replace, -1) return strings.ReplaceAll(s, find, replace)
} }
dest["joinHostPort"] = net.JoinHostPort dest["joinHostPort"] = net.JoinHostPort
@ -131,10 +131,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS
} }
dest["GossipEnabled"] = func() bool { dest["GossipEnabled"] = func() bool {
if cluster.UsesLegacyGossip() { return cluster.UsesLegacyGossip()
return true
}
return false
} }
dest["PublishesDNSRecords"] = func() bool { dest["PublishesDNSRecords"] = func() bool {
return cluster.PublishesDNSRecords() return cluster.PublishesDNSRecords()
@ -383,7 +380,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS
if cluster.Spec.CloudProvider.AWS != nil && cluster.Spec.CloudProvider.AWS.NodeTerminationHandler != nil { if cluster.Spec.CloudProvider.AWS != nil && cluster.Spec.CloudProvider.AWS.NodeTerminationHandler != nil {
dest["DefaultQueueName"] = func() string { dest["DefaultQueueName"] = func() string {
s := strings.Replace(tf.ClusterName(), ".", "-", -1) s := strings.ReplaceAll(tf.ClusterName(), ".", "-")
domain := ".amazonaws.com/" domain := ".amazonaws.com/"
if strings.Contains(tf.Region, "cn-") { if strings.Contains(tf.Region, "cn-") {
domain = ".amazonaws.com.cn/" domain = ".amazonaws.com.cn/"

View File

@ -108,7 +108,6 @@ func writeLocalsOutputs(buf *bytes.Buffer, outputs map[string]terraformWriter.Ou
toElement(&output{Value: locals[tfName]}).Write(buf, 0, fmt.Sprintf("output %q", tfName)) toElement(&output{Value: locals[tfName]}).Write(buf, 0, fmt.Sprintf("output %q", tfName))
buf.WriteString("\n") buf.WriteString("\n")
} }
return
} }
func (t *TerraformTarget) writeProviders(buf *bytes.Buffer) { func (t *TerraformTarget) writeProviders(buf *bytes.Buffer) {

View File

@ -241,7 +241,8 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
return err return err
} }
if bootConfig.CloudProvider == api.CloudProviderAWS { switch bootConfig.CloudProvider {
case api.CloudProviderAWS:
instanceIDBytes, err := vfs.Context.ReadFile("metadata://aws/meta-data/instance-id") instanceIDBytes, err := vfs.Context.ReadFile("metadata://aws/meta-data/instance-id")
if err != nil { if err != nil {
return fmt.Errorf("error reading instance-id from AWS metadata: %v", err) return fmt.Errorf("error reading instance-id from AWS metadata: %v", err)
@ -276,7 +277,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
modelContext.GPUVendor = architectures.GPUVendorNvidia modelContext.GPUVendor = architectures.GPUVendorNvidia
} }
} }
} else if bootConfig.CloudProvider == api.CloudProviderOpenstack { case api.CloudProviderOpenstack:
// NvidiaGPU possible to enable only in instance group level in OpenStack. When we assume that GPU is supported // NvidiaGPU possible to enable only in instance group level in OpenStack. When we assume that GPU is supported
if nodeupConfig.NvidiaGPU != nil && fi.ValueOf(nodeupConfig.NvidiaGPU.Enabled) { if nodeupConfig.NvidiaGPU != nil && fi.ValueOf(nodeupConfig.NvidiaGPU.Enabled) {
klog.Info("instance supports GPU acceleration") klog.Info("instance supports GPU acceleration")

View File

@ -243,7 +243,8 @@ func (_ *File) RenderLocal(_ *local.LocalTarget, a, e, changes *File) error {
} }
changed := false changed := false
if e.Type == FileType_Symlink { switch e.Type {
case FileType_Symlink:
if changes.Symlink != nil { if changes.Symlink != nil {
// This will currently fail if the target already exists. // This will currently fail if the target already exists.
// That's probably a good thing for now ... it is hard to know what to do here! // That's probably a good thing for now ... it is hard to know what to do here!
@ -254,7 +255,7 @@ func (_ *File) RenderLocal(_ *local.LocalTarget, a, e, changes *File) error {
} }
changed = true changed = true
} }
} else if e.Type == FileType_Directory { case FileType_Directory:
if a == nil { if a == nil {
parent := filepath.Dir(strings.TrimSuffix(e.Path, "/")) parent := filepath.Dir(strings.TrimSuffix(e.Path, "/"))
err := os.MkdirAll(parent, dirMode) err := os.MkdirAll(parent, dirMode)
@ -268,7 +269,7 @@ func (_ *File) RenderLocal(_ *local.LocalTarget, a, e, changes *File) error {
} }
changed = true changed = true
} }
} else if e.Type == FileType_File { case FileType_File:
if changes.Contents != nil { if changes.Contents != nil {
err = fi.WriteFile(e.Path, e.Contents, fileMode, dirMode, fi.ValueOf(e.Owner), fi.ValueOf(e.Group)) err = fi.WriteFile(e.Path, e.Contents, fileMode, dirMode, fi.ValueOf(e.Owner), fi.ValueOf(e.Group))
if err != nil { if err != nil {
@ -276,7 +277,7 @@ func (_ *File) RenderLocal(_ *local.LocalTarget, a, e, changes *File) error {
} }
changed = true changed = true
} }
} else { default:
return fmt.Errorf("File type=%q not valid/supported", e.Type) return fmt.Errorf("File type=%q not valid/supported", e.Type)
} }

View File

@ -142,8 +142,7 @@ func (_ *LoadImageTask) RenderLocal(t *local.LocalTarget, a, e, changes *LoadIma
} }
// Load the container image // Load the container image
var args []string args := []string{"ctr", "--namespace", "k8s.io", "images", "import", tarFile}
args = []string{"ctr", "--namespace", "k8s.io", "images", "import", tarFile}
human := strings.Join(args, " ") human := strings.Join(args, " ")
klog.Infof("running command %s", human) klog.Infof("running command %s", human)

View File

@ -302,7 +302,7 @@ func (c *VFSCAStore) AddSSHPublicKey(ctx context.Context, pubkey []byte) error {
func (c *VFSCAStore) buildSSHPublicKeyPath(id string) vfs.Path { func (c *VFSCAStore) buildSSHPublicKeyPath(id string) vfs.Path {
// id is fingerprint with colons, but we store without colons // id is fingerprint with colons, but we store without colons
id = strings.Replace(id, ":", "", -1) id = strings.ReplaceAll(id, ":", "")
return c.basedir.Join("ssh", "public", "admin", id) return c.basedir.Join("ssh", "public", "admin", id)
} }

View File

@ -23,7 +23,7 @@ import (
// SplitContentToSections splits content of a kops manifest into sections. // SplitContentToSections splits content of a kops manifest into sections.
func SplitContentToSections(content []byte) [][]byte { func SplitContentToSections(content []byte) [][]byte {
// replace windows line endings with unix ones // replace windows line endings with unix ones
normalized := bytes.Replace(content, []byte("\r\n"), []byte("\n"), -1) normalized := bytes.ReplaceAll(content, []byte("\r\n"), []byte("\n"))
return bytes.Split(normalized, []byte("\n---\n")) return bytes.Split(normalized, []byte("\n---\n"))
} }

View File

@ -696,7 +696,8 @@ func (p *S3Path) RenderTerraform(w *terraformWriter.TerraformWriter, name string
} }
// render DO's terraform // render DO's terraform
if p.scheme == "do" { switch p.scheme {
case "do":
content, err := w.AddFileBytes("digitalocean_spaces_bucket_object", name, "content", bytes, false) content, err := w.AddFileBytes("digitalocean_spaces_bucket_object", name, "content", bytes, false)
if err != nil { if err != nil {
@ -719,7 +720,7 @@ func (p *S3Path) RenderTerraform(w *terraformWriter.TerraformWriter, name string
return w.RenderResource("digitalocean_spaces_bucket_object", name, tf) return w.RenderResource("digitalocean_spaces_bucket_object", name, tf)
// render Scaleway's Terraform objects // render Scaleway's Terraform objects
} else if p.scheme == "scw" { case "scw":
content, err := w.AddFileBytes("scaleway_object", name, "content", bytes, false) content, err := w.AddFileBytes("scaleway_object", name, "content", bytes, false)
if err != nil { if err != nil {
@ -733,7 +734,7 @@ func (p *S3Path) RenderTerraform(w *terraformWriter.TerraformWriter, name string
} }
return w.RenderResource("scaleway_object", name, tf) return w.RenderResource("scaleway_object", name, tf)
} else { default:
bucketDetails, err := p.getBucketDetails(ctx) bucketDetails, err := p.getBucketDetails(ctx)
if err != nil { if err != nil {
return err return err