mirror of https://github.com/kubernetes/kops.git
fix golint failures
This commit is contained in:
parent
69fe8e3689
commit
4b49412105
|
@ -131,9 +131,8 @@ func (c *ChannelVersion) replaces(existing *ChannelVersion) bool {
|
|||
if c.ManifestHash == existing.ManifestHash {
|
||||
klog.V(4).Infof("Manifest Match")
|
||||
return false
|
||||
} else {
|
||||
klog.V(4).Infof("Channels had same version and ids %q, %q but different ManifestHash (%q vs %q); will replace", *c.Version, c.Id, c.ManifestHash, existing.ManifestHash)
|
||||
}
|
||||
klog.V(4).Infof("Channels had same version and ids %q, %q but different ManifestHash (%q vs %q); will replace", *c.Version, c.Id, c.ManifestHash, existing.ManifestHash)
|
||||
} else {
|
||||
klog.V(4).Infof("Channels had same version %q but different ids (%q vs %q); will replace", *c.Version, c.Id, existing.Id)
|
||||
}
|
||||
|
|
|
@ -137,9 +137,8 @@ func (m *MockRoute53) ChangeResourceRecordSets(request *route53.ChangeResourceRe
|
|||
if foundIndex == -1 {
|
||||
// TODO: Use correct error
|
||||
return nil, fmt.Errorf("record not found %s %q", changeType, changeName)
|
||||
} else {
|
||||
zone.records = append(zone.records[:foundIndex], zone.records[foundIndex+1:]...)
|
||||
}
|
||||
zone.records = append(zone.records[:foundIndex], zone.records[foundIndex+1:]...)
|
||||
|
||||
default:
|
||||
// TODO: Use correct error
|
||||
|
|
|
@ -183,9 +183,8 @@ func RunCreate(f *util.Factory, out io.Writer, c *CreateOptions) error {
|
|||
return fmt.Errorf("instanceGroup %q already exists", v.ObjectMeta.Name)
|
||||
}
|
||||
return fmt.Errorf("error creating instanceGroup: %v", err)
|
||||
} else {
|
||||
fmt.Fprintf(&sb, "Created instancegroup/%s\n", v.ObjectMeta.Name)
|
||||
}
|
||||
fmt.Fprintf(&sb, "Created instancegroup/%s\n", v.ObjectMeta.Name)
|
||||
|
||||
case *kopsapi.SSHCredential:
|
||||
clusterName = v.ObjectMeta.Labels[kopsapi.LabelClusterName]
|
||||
|
|
|
@ -272,27 +272,26 @@ func (c *UpgradeClusterCmd) Run(args []string) error {
|
|||
if !c.Yes {
|
||||
fmt.Printf("\nMust specify --yes to perform upgrade\n")
|
||||
return nil
|
||||
} else {
|
||||
for _, action := range actions {
|
||||
action.apply()
|
||||
}
|
||||
|
||||
if err := commands.UpdateCluster(clientset, cluster, instanceGroups); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, g := range instanceGroups {
|
||||
_, err := clientset.InstanceGroupsFor(cluster).Update(g)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing InstanceGroup %q: %v", g.ObjectMeta.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nUpdates applied to configuration.\n")
|
||||
|
||||
// TODO: automate this step
|
||||
fmt.Printf("You can now apply these changes, using `kops update cluster %s`\n", cluster.ObjectMeta.Name)
|
||||
}
|
||||
for _, action := range actions {
|
||||
action.apply()
|
||||
}
|
||||
|
||||
if err := commands.UpdateCluster(clientset, cluster, instanceGroups); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, g := range instanceGroups {
|
||||
_, err := clientset.InstanceGroupsFor(cluster).Update(g)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing InstanceGroup %q: %v", g.ObjectMeta.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nUpdates applied to configuration.\n")
|
||||
|
||||
// TODO: automate this step
|
||||
fmt.Printf("You can now apply these changes, using `kops update cluster %s`\n", cluster.ObjectMeta.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -139,11 +139,10 @@ func RunValidateCluster(f *util.Factory, cmd *cobra.Command, args []string, out
|
|||
if err != nil {
|
||||
if time.Now().After(timeout) {
|
||||
return nil, fmt.Errorf("unexpected error during validation: %v", err)
|
||||
} else {
|
||||
klog.Warningf("(will retry): unexpected error during validation: %v", err)
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
}
|
||||
klog.Warningf("(will retry): unexpected error during validation: %v", err)
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
switch options.output {
|
||||
|
|
|
@ -79,9 +79,8 @@ func (s *s3PublicAclStrategy) GetACL(p vfs.Path, cluster *kops.Cluster) (vfs.ACL
|
|||
return &vfs.S3Acl{
|
||||
RequestACL: values.String("public-read"),
|
||||
}, nil
|
||||
} else {
|
||||
klog.V(8).Infof("path %q is not inside the file registry %q, not setting public-read acl", u.Path, config.Path)
|
||||
}
|
||||
klog.V(8).Infof("path %q is not inside the file registry %q, not setting public-read acl", u.Path, config.Path)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -140,9 +140,8 @@ func (v *KubernetesVersionSpec) FindRecommendedUpgrade(version semver.Version) (
|
|||
if recommendedVersion.GT(version) {
|
||||
klog.V(2).Infof("RecommendedVersion=%q, Have=%q. Recommending upgrade", recommendedVersion, version)
|
||||
return recommendedVersion, nil
|
||||
} else {
|
||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||
}
|
||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -160,9 +159,8 @@ func (v *KopsVersionSpec) FindRecommendedUpgrade(version semver.Version) (*semve
|
|||
if recommendedVersion.GT(version) {
|
||||
klog.V(2).Infof("RecommendedVersion=%q, Have=%q. Recommending upgrade", recommendedVersion, version)
|
||||
return &recommendedVersion, nil
|
||||
} else {
|
||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||
}
|
||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -180,9 +178,8 @@ func (v *KubernetesVersionSpec) IsUpgradeRequired(version semver.Version) (bool,
|
|||
if requiredVersion.GT(version) {
|
||||
klog.V(2).Infof("RequiredVersion=%q, Have=%q. Requiring upgrade", requiredVersion, version)
|
||||
return true, nil
|
||||
} else {
|
||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||
}
|
||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -200,9 +197,8 @@ func (v *KopsVersionSpec) IsUpgradeRequired(version semver.Version) (bool, error
|
|||
if requiredVersion.GT(version) {
|
||||
klog.V(2).Infof("RequiredVersion=%q, Have=%q. Requiring upgrade", requiredVersion, version)
|
||||
return true, nil
|
||||
} else {
|
||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||
}
|
||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -79,9 +79,8 @@ func (c *CloudInstanceGroup) NewCloudInstanceGroupMember(instanceId string, newG
|
|||
func (c *CloudInstanceGroup) Status() string {
|
||||
if len(c.NeedUpdate) == 0 {
|
||||
return "Ready"
|
||||
} else {
|
||||
return "NeedsUpdate"
|
||||
}
|
||||
return "NeedsUpdate"
|
||||
}
|
||||
|
||||
// GetNodeMap returns a list of nodes keyed by their external id
|
||||
|
|
|
@ -136,10 +136,9 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
|
|||
if err = r.ValidateCluster(rollingUpdateData, cluster, instanceGroupList); err != nil {
|
||||
if rollingUpdateData.FailOnValidate {
|
||||
return fmt.Errorf("error validating cluster: %v", err)
|
||||
} else {
|
||||
klog.V(2).Infof("Ignoring cluster validation error: %v", err)
|
||||
klog.Info("Cluster validation failed, but proceeding since fail-on-validate-error is set to false")
|
||||
}
|
||||
klog.V(2).Infof("Ignoring cluster validation error: %v", err)
|
||||
klog.Info("Cluster validation failed, but proceeding since fail-on-validate-error is set to false")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,9 +164,8 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
|
|||
if err = r.DrainNode(u, rollingUpdateData); err != nil {
|
||||
if rollingUpdateData.FailOnDrainError {
|
||||
return fmt.Errorf("failed to drain node %q: %v", nodeName, err)
|
||||
} else {
|
||||
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
|
||||
}
|
||||
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
|
||||
|
|
|
@ -127,9 +127,8 @@ func (b *ScalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
launchConfiguration.SSHKey = b.LinkToSSHKey()
|
||||
}
|
||||
launchConfiguration.SSHKey = b.LinkToSSHKey()
|
||||
if launchConfiguration.UserData, err = b.BootstrapScript.ResourceNodeUp(ig, b.Cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -102,9 +102,8 @@ func parsePEMCertificate(pemData []byte) (*x509.Certificate, error) {
|
|||
if block.Type == "CERTIFICATE" {
|
||||
klog.V(10).Infof("Parsing pem block: %q", block.Type)
|
||||
return x509.ParseCertificate(block.Bytes)
|
||||
} else {
|
||||
klog.Infof("Ignoring unexpected PEM block: %q", block.Type)
|
||||
}
|
||||
klog.Infof("Ignoring unexpected PEM block: %q", block.Type)
|
||||
|
||||
pemData = rest
|
||||
}
|
||||
|
|
|
@ -453,11 +453,10 @@ func DeleteRoleRam(cloud fi.Cloud, r *resources.Resource) error {
|
|||
response, err := c.RamClient().ListPoliciesForRole(roleQueryRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("err listing Policies for role:%v", err)
|
||||
} else {
|
||||
if len(response.Policies.Policy) != 0 {
|
||||
for _, policy := range response.Policies.Policy {
|
||||
policies = append(policies, policy.PolicyName)
|
||||
}
|
||||
}
|
||||
if len(response.Policies.Policy) != 0 {
|
||||
for _, policy := range response.Policies.Policy {
|
||||
policies = append(policies, policy.PolicyName)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -54,15 +54,14 @@ func (s *StringOrSlice) UnmarshalJSON(value []byte) error {
|
|||
return nil
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
s.forceEncodeAsArray = false
|
||||
var stringValue string
|
||||
if err := json.Unmarshal(value, &stringValue); err != nil {
|
||||
return err
|
||||
}
|
||||
s.values = []string{stringValue}
|
||||
return nil
|
||||
}
|
||||
s.forceEncodeAsArray = false
|
||||
var stringValue string
|
||||
if err := json.Unmarshal(value, &stringValue); err != nil {
|
||||
return err
|
||||
}
|
||||
s.values = []string{stringValue}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the string value, or the Itoa of the int value.
|
||||
|
|
|
@ -114,10 +114,9 @@ func (p *peer) OnGossip(buf []byte) (delta mesh.GossipData, err error) {
|
|||
// per OnGossip requirements
|
||||
klog.V(4).Infof("OnGossip %v => delta empty", message)
|
||||
return nil, nil
|
||||
} else {
|
||||
klog.V(4).Infof("OnGossip %v => delta %v", message, deltas)
|
||||
return deltas, nil
|
||||
}
|
||||
klog.V(4).Infof("OnGossip %v => delta %v", message, deltas)
|
||||
return deltas, nil
|
||||
}
|
||||
|
||||
// Merge the gossiped data represented by buf into our state.
|
||||
|
|
|
@ -423,11 +423,10 @@ func (a *AWSVolumes) AttachVolume(volume *Volume) error {
|
|||
|
||||
volume.LocalDevice = device
|
||||
return nil
|
||||
} else {
|
||||
a.releaseDevice(device, volumeID)
|
||||
|
||||
return fmt.Errorf("Unable to attach volume %q, was attached to %q", volumeID, v.AttachedTo)
|
||||
}
|
||||
a.releaseDevice(device, volumeID)
|
||||
|
||||
return fmt.Errorf("Unable to attach volume %q, was attached to %q", volumeID, v.AttachedTo)
|
||||
}
|
||||
|
||||
switch v.Status {
|
||||
|
|
|
@ -178,9 +178,8 @@ func (k *VolumeMountController) safeFormatAndMount(volume *Volume, mountpoint st
|
|||
}
|
||||
|
||||
return fmt.Errorf("found multiple existing mounts of %q at %q", device, mountpoint)
|
||||
} else {
|
||||
klog.Infof("Found existing mount of %q at %q", device, mountpoint)
|
||||
}
|
||||
klog.Infof("Found existing mount of %q at %q", device, mountpoint)
|
||||
}
|
||||
|
||||
// If we're containerized we also want to mount the device (again) into our container
|
||||
|
|
Loading…
Reference in New Issue