mirror of https://github.com/kubernetes/kops.git
commit
e29a04e5af
|
@ -131,9 +131,8 @@ func (c *ChannelVersion) replaces(existing *ChannelVersion) bool {
|
||||||
if c.ManifestHash == existing.ManifestHash {
|
if c.ManifestHash == existing.ManifestHash {
|
||||||
klog.V(4).Infof("Manifest Match")
|
klog.V(4).Infof("Manifest Match")
|
||||||
return false
|
return false
|
||||||
} else {
|
|
||||||
klog.V(4).Infof("Channels had same version and ids %q, %q but different ManifestHash (%q vs %q); will replace", *c.Version, c.Id, c.ManifestHash, existing.ManifestHash)
|
|
||||||
}
|
}
|
||||||
|
klog.V(4).Infof("Channels had same version and ids %q, %q but different ManifestHash (%q vs %q); will replace", *c.Version, c.Id, c.ManifestHash, existing.ManifestHash)
|
||||||
} else {
|
} else {
|
||||||
klog.V(4).Infof("Channels had same version %q but different ids (%q vs %q); will replace", *c.Version, c.Id, existing.Id)
|
klog.V(4).Infof("Channels had same version %q but different ids (%q vs %q); will replace", *c.Version, c.Id, existing.Id)
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,9 +137,8 @@ func (m *MockRoute53) ChangeResourceRecordSets(request *route53.ChangeResourceRe
|
||||||
if foundIndex == -1 {
|
if foundIndex == -1 {
|
||||||
// TODO: Use correct error
|
// TODO: Use correct error
|
||||||
return nil, fmt.Errorf("record not found %s %q", changeType, changeName)
|
return nil, fmt.Errorf("record not found %s %q", changeType, changeName)
|
||||||
} else {
|
|
||||||
zone.records = append(zone.records[:foundIndex], zone.records[foundIndex+1:]...)
|
|
||||||
}
|
}
|
||||||
|
zone.records = append(zone.records[:foundIndex], zone.records[foundIndex+1:]...)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// TODO: Use correct error
|
// TODO: Use correct error
|
||||||
|
|
|
@ -183,9 +183,8 @@ func RunCreate(f *util.Factory, out io.Writer, c *CreateOptions) error {
|
||||||
return fmt.Errorf("instanceGroup %q already exists", v.ObjectMeta.Name)
|
return fmt.Errorf("instanceGroup %q already exists", v.ObjectMeta.Name)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("error creating instanceGroup: %v", err)
|
return fmt.Errorf("error creating instanceGroup: %v", err)
|
||||||
} else {
|
|
||||||
fmt.Fprintf(&sb, "Created instancegroup/%s\n", v.ObjectMeta.Name)
|
|
||||||
}
|
}
|
||||||
|
fmt.Fprintf(&sb, "Created instancegroup/%s\n", v.ObjectMeta.Name)
|
||||||
|
|
||||||
case *kopsapi.SSHCredential:
|
case *kopsapi.SSHCredential:
|
||||||
clusterName = v.ObjectMeta.Labels[kopsapi.LabelClusterName]
|
clusterName = v.ObjectMeta.Labels[kopsapi.LabelClusterName]
|
||||||
|
|
|
@ -272,7 +272,7 @@ func (c *UpgradeClusterCmd) Run(args []string) error {
|
||||||
if !c.Yes {
|
if !c.Yes {
|
||||||
fmt.Printf("\nMust specify --yes to perform upgrade\n")
|
fmt.Printf("\nMust specify --yes to perform upgrade\n")
|
||||||
return nil
|
return nil
|
||||||
} else {
|
}
|
||||||
for _, action := range actions {
|
for _, action := range actions {
|
||||||
action.apply()
|
action.apply()
|
||||||
}
|
}
|
||||||
|
@ -292,7 +292,6 @@ func (c *UpgradeClusterCmd) Run(args []string) error {
|
||||||
|
|
||||||
// TODO: automate this step
|
// TODO: automate this step
|
||||||
fmt.Printf("You can now apply these changes, using `kops update cluster %s`\n", cluster.ObjectMeta.Name)
|
fmt.Printf("You can now apply these changes, using `kops update cluster %s`\n", cluster.ObjectMeta.Name)
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,12 +139,11 @@ func RunValidateCluster(f *util.Factory, cmd *cobra.Command, args []string, out
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if time.Now().After(timeout) {
|
if time.Now().After(timeout) {
|
||||||
return nil, fmt.Errorf("unexpected error during validation: %v", err)
|
return nil, fmt.Errorf("unexpected error during validation: %v", err)
|
||||||
} else {
|
}
|
||||||
klog.Warningf("(will retry): unexpected error during validation: %v", err)
|
klog.Warningf("(will retry): unexpected error during validation: %v", err)
|
||||||
time.Sleep(pollInterval)
|
time.Sleep(pollInterval)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
switch options.output {
|
switch options.output {
|
||||||
case OutputTable:
|
case OutputTable:
|
||||||
|
|
|
@ -79,9 +79,8 @@ func (s *s3PublicAclStrategy) GetACL(p vfs.Path, cluster *kops.Cluster) (vfs.ACL
|
||||||
return &vfs.S3Acl{
|
return &vfs.S3Acl{
|
||||||
RequestACL: values.String("public-read"),
|
RequestACL: values.String("public-read"),
|
||||||
}, nil
|
}, nil
|
||||||
} else {
|
|
||||||
klog.V(8).Infof("path %q is not inside the file registry %q, not setting public-read acl", u.Path, config.Path)
|
|
||||||
}
|
}
|
||||||
|
klog.V(8).Infof("path %q is not inside the file registry %q, not setting public-read acl", u.Path, config.Path)
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,9 +140,8 @@ func (v *KubernetesVersionSpec) FindRecommendedUpgrade(version semver.Version) (
|
||||||
if recommendedVersion.GT(version) {
|
if recommendedVersion.GT(version) {
|
||||||
klog.V(2).Infof("RecommendedVersion=%q, Have=%q. Recommending upgrade", recommendedVersion, version)
|
klog.V(2).Infof("RecommendedVersion=%q, Have=%q. Recommending upgrade", recommendedVersion, version)
|
||||||
return recommendedVersion, nil
|
return recommendedVersion, nil
|
||||||
} else {
|
|
||||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
|
||||||
}
|
}
|
||||||
|
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,9 +159,8 @@ func (v *KopsVersionSpec) FindRecommendedUpgrade(version semver.Version) (*semve
|
||||||
if recommendedVersion.GT(version) {
|
if recommendedVersion.GT(version) {
|
||||||
klog.V(2).Infof("RecommendedVersion=%q, Have=%q. Recommending upgrade", recommendedVersion, version)
|
klog.V(2).Infof("RecommendedVersion=%q, Have=%q. Recommending upgrade", recommendedVersion, version)
|
||||||
return &recommendedVersion, nil
|
return &recommendedVersion, nil
|
||||||
} else {
|
|
||||||
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
|
||||||
}
|
}
|
||||||
|
klog.V(4).Infof("RecommendedVersion=%q, Have=%q. No upgrade needed.", recommendedVersion, version)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -180,9 +178,8 @@ func (v *KubernetesVersionSpec) IsUpgradeRequired(version semver.Version) (bool,
|
||||||
if requiredVersion.GT(version) {
|
if requiredVersion.GT(version) {
|
||||||
klog.V(2).Infof("RequiredVersion=%q, Have=%q. Requiring upgrade", requiredVersion, version)
|
klog.V(2).Infof("RequiredVersion=%q, Have=%q. Requiring upgrade", requiredVersion, version)
|
||||||
return true, nil
|
return true, nil
|
||||||
} else {
|
|
||||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
|
||||||
}
|
}
|
||||||
|
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,9 +197,8 @@ func (v *KopsVersionSpec) IsUpgradeRequired(version semver.Version) (bool, error
|
||||||
if requiredVersion.GT(version) {
|
if requiredVersion.GT(version) {
|
||||||
klog.V(2).Infof("RequiredVersion=%q, Have=%q. Requiring upgrade", requiredVersion, version)
|
klog.V(2).Infof("RequiredVersion=%q, Have=%q. Requiring upgrade", requiredVersion, version)
|
||||||
return true, nil
|
return true, nil
|
||||||
} else {
|
|
||||||
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
|
||||||
}
|
}
|
||||||
|
klog.V(4).Infof("RequiredVersion=%q, Have=%q. No upgrade needed.", requiredVersion, version)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -79,9 +79,8 @@ func (c *CloudInstanceGroup) NewCloudInstanceGroupMember(instanceId string, newG
|
||||||
func (c *CloudInstanceGroup) Status() string {
|
func (c *CloudInstanceGroup) Status() string {
|
||||||
if len(c.NeedUpdate) == 0 {
|
if len(c.NeedUpdate) == 0 {
|
||||||
return "Ready"
|
return "Ready"
|
||||||
} else {
|
|
||||||
return "NeedsUpdate"
|
|
||||||
}
|
}
|
||||||
|
return "NeedsUpdate"
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeMap returns a list of nodes keyed by their external id
|
// GetNodeMap returns a list of nodes keyed by their external id
|
||||||
|
|
|
@ -136,12 +136,11 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
|
||||||
if err = r.ValidateCluster(rollingUpdateData, cluster, instanceGroupList); err != nil {
|
if err = r.ValidateCluster(rollingUpdateData, cluster, instanceGroupList); err != nil {
|
||||||
if rollingUpdateData.FailOnValidate {
|
if rollingUpdateData.FailOnValidate {
|
||||||
return fmt.Errorf("error validating cluster: %v", err)
|
return fmt.Errorf("error validating cluster: %v", err)
|
||||||
} else {
|
}
|
||||||
klog.V(2).Infof("Ignoring cluster validation error: %v", err)
|
klog.V(2).Infof("Ignoring cluster validation error: %v", err)
|
||||||
klog.Info("Cluster validation failed, but proceeding since fail-on-validate-error is set to false")
|
klog.Info("Cluster validation failed, but proceeding since fail-on-validate-error is set to false")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for _, u := range update {
|
for _, u := range update {
|
||||||
instanceId := u.ID
|
instanceId := u.ID
|
||||||
|
@ -165,9 +164,8 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
|
||||||
if err = r.DrainNode(u, rollingUpdateData); err != nil {
|
if err = r.DrainNode(u, rollingUpdateData); err != nil {
|
||||||
if rollingUpdateData.FailOnDrainError {
|
if rollingUpdateData.FailOnDrainError {
|
||||||
return fmt.Errorf("failed to drain node %q: %v", nodeName, err)
|
return fmt.Errorf("failed to drain node %q: %v", nodeName, err)
|
||||||
} else {
|
|
||||||
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
|
|
||||||
}
|
}
|
||||||
|
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
|
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
|
||||||
|
|
|
@ -127,9 +127,8 @@ func (b *ScalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
|
||||||
launchConfiguration.SSHKey = b.LinkToSSHKey()
|
|
||||||
}
|
}
|
||||||
|
launchConfiguration.SSHKey = b.LinkToSSHKey()
|
||||||
if launchConfiguration.UserData, err = b.BootstrapScript.ResourceNodeUp(ig, b.Cluster); err != nil {
|
if launchConfiguration.UserData, err = b.BootstrapScript.ResourceNodeUp(ig, b.Cluster); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -102,9 +102,8 @@ func parsePEMCertificate(pemData []byte) (*x509.Certificate, error) {
|
||||||
if block.Type == "CERTIFICATE" {
|
if block.Type == "CERTIFICATE" {
|
||||||
klog.V(10).Infof("Parsing pem block: %q", block.Type)
|
klog.V(10).Infof("Parsing pem block: %q", block.Type)
|
||||||
return x509.ParseCertificate(block.Bytes)
|
return x509.ParseCertificate(block.Bytes)
|
||||||
} else {
|
|
||||||
klog.Infof("Ignoring unexpected PEM block: %q", block.Type)
|
|
||||||
}
|
}
|
||||||
|
klog.Infof("Ignoring unexpected PEM block: %q", block.Type)
|
||||||
|
|
||||||
pemData = rest
|
pemData = rest
|
||||||
}
|
}
|
||||||
|
|
|
@ -453,13 +453,12 @@ func DeleteRoleRam(cloud fi.Cloud, r *resources.Resource) error {
|
||||||
response, err := c.RamClient().ListPoliciesForRole(roleQueryRequest)
|
response, err := c.RamClient().ListPoliciesForRole(roleQueryRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("err listing Policies for role:%v", err)
|
return fmt.Errorf("err listing Policies for role:%v", err)
|
||||||
} else {
|
}
|
||||||
if len(response.Policies.Policy) != 0 {
|
if len(response.Policies.Policy) != 0 {
|
||||||
for _, policy := range response.Policies.Policy {
|
for _, policy := range response.Policies.Policy {
|
||||||
policies = append(policies, policy.PolicyName)
|
policies = append(policies, policy.PolicyName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for _, policy := range policies {
|
for _, policy := range policies {
|
||||||
klog.V(2).Infof("Removing RolePolicy %s of RamRole %s", policy, r.Name)
|
klog.V(2).Infof("Removing RolePolicy %s of RamRole %s", policy, r.Name)
|
||||||
|
|
|
@ -54,7 +54,7 @@ func (s *StringOrSlice) UnmarshalJSON(value []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
} else {
|
}
|
||||||
s.forceEncodeAsArray = false
|
s.forceEncodeAsArray = false
|
||||||
var stringValue string
|
var stringValue string
|
||||||
if err := json.Unmarshal(value, &stringValue); err != nil {
|
if err := json.Unmarshal(value, &stringValue); err != nil {
|
||||||
|
@ -63,7 +63,6 @@ func (s *StringOrSlice) UnmarshalJSON(value []byte) error {
|
||||||
s.values = []string{stringValue}
|
s.values = []string{stringValue}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string value, or the Itoa of the int value.
|
// String returns the string value, or the Itoa of the int value.
|
||||||
func (s StringOrSlice) String() string {
|
func (s StringOrSlice) String() string {
|
||||||
|
|
|
@ -114,11 +114,10 @@ func (p *peer) OnGossip(buf []byte) (delta mesh.GossipData, err error) {
|
||||||
// per OnGossip requirements
|
// per OnGossip requirements
|
||||||
klog.V(4).Infof("OnGossip %v => delta empty", message)
|
klog.V(4).Infof("OnGossip %v => delta empty", message)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
} else {
|
}
|
||||||
klog.V(4).Infof("OnGossip %v => delta %v", message, deltas)
|
klog.V(4).Infof("OnGossip %v => delta %v", message, deltas)
|
||||||
return deltas, nil
|
return deltas, nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Merge the gossiped data represented by buf into our state.
|
// Merge the gossiped data represented by buf into our state.
|
||||||
// Return the state information that was modified.
|
// Return the state information that was modified.
|
||||||
|
|
|
@ -423,12 +423,11 @@ func (a *AWSVolumes) AttachVolume(volume *Volume) error {
|
||||||
|
|
||||||
volume.LocalDevice = device
|
volume.LocalDevice = device
|
||||||
return nil
|
return nil
|
||||||
} else {
|
}
|
||||||
a.releaseDevice(device, volumeID)
|
a.releaseDevice(device, volumeID)
|
||||||
|
|
||||||
return fmt.Errorf("Unable to attach volume %q, was attached to %q", volumeID, v.AttachedTo)
|
return fmt.Errorf("Unable to attach volume %q, was attached to %q", volumeID, v.AttachedTo)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Status {
|
switch v.Status {
|
||||||
case "attaching":
|
case "attaching":
|
||||||
|
|
|
@ -178,9 +178,8 @@ func (k *VolumeMountController) safeFormatAndMount(volume *Volume, mountpoint st
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("found multiple existing mounts of %q at %q", device, mountpoint)
|
return fmt.Errorf("found multiple existing mounts of %q at %q", device, mountpoint)
|
||||||
} else {
|
|
||||||
klog.Infof("Found existing mount of %q at %q", device, mountpoint)
|
|
||||||
}
|
}
|
||||||
|
klog.Infof("Found existing mount of %q at %q", device, mountpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're containerized we also want to mount the device (again) into our container
|
// If we're containerized we also want to mount the device (again) into our container
|
||||||
|
|
Loading…
Reference in New Issue