mirror of https://github.com/kubernetes/kops.git
Fix field names in api validation
This commit is contained in:
parent
0e4db376df
commit
8844b97fe3
|
@ -110,13 +110,13 @@ func (a *Addon) GetRequiredUpdates(k8sClient kubernetes.Interface) (*AddonUpdate
|
|||
|
||||
func (a *Addon) GetManifestFullUrl() (*url.URL, error) {
|
||||
if a.Spec.Manifest == nil || *a.Spec.Manifest == "" {
|
||||
return nil, field.Required(field.NewPath("Spec", "Manifest"), "")
|
||||
return nil, field.Required(field.NewPath("spec", "manifest"), "")
|
||||
}
|
||||
|
||||
manifest := *a.Spec.Manifest
|
||||
manifestURL, err := url.Parse(manifest)
|
||||
if err != nil {
|
||||
return nil, field.Invalid(field.NewPath("Spec", "Manifest"), manifest, "Not a valid URL")
|
||||
return nil, field.Invalid(field.NewPath("spec", "manifest"), manifest, "Not a valid URL")
|
||||
}
|
||||
if !manifestURL.IsAbs() {
|
||||
manifestURL = a.ChannelLocation.ResolveReference(manifestURL)
|
||||
|
|
|
@ -1033,7 +1033,7 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
MajorVersion: "v3",
|
||||
}
|
||||
// Validate to check if etcd clusters have an acceptable version
|
||||
if errList := validation.ValidateEtcdVersionForCalicoV3(cluster.Spec.EtcdClusters[0], cluster.Spec.Networking.Calico.MajorVersion, field.NewPath("Calico")); len(errList) != 0 {
|
||||
if errList := validation.ValidateEtcdVersionForCalicoV3(cluster.Spec.EtcdClusters[0], cluster.Spec.Networking.Calico.MajorVersion, field.NewPath("spec", "networking", "calico")); len(errList) != 0 {
|
||||
|
||||
// This is not a special version but simply of the 3 series
|
||||
for _, etcd := range cluster.Spec.EtcdClusters {
|
||||
|
|
|
@ -285,7 +285,7 @@ func (c *RootCmd) Cluster() (*kopsapi.Cluster, error) {
|
|||
|
||||
func GetCluster(factory Factory, clusterName string) (*kopsapi.Cluster, error) {
|
||||
if clusterName == "" {
|
||||
return nil, field.Required(field.NewPath("ClusterName"), "Cluster name is required")
|
||||
return nil, field.Required(field.NewPath("clusterName"), "Cluster name is required")
|
||||
}
|
||||
|
||||
clientset, err := factory.Clientset()
|
||||
|
|
|
@ -33,7 +33,7 @@ const (
|
|||
|
||||
func ConfigBase(c *api.Cluster) (vfs.Path, error) {
|
||||
if c.Spec.ConfigBase == "" {
|
||||
return nil, field.Required(field.NewPath("Spec", "ConfigBase"), "")
|
||||
return nil, field.Required(field.NewPath("spec", "configBase"), "")
|
||||
}
|
||||
configBase, err := vfs.Context.BuildVfsPath(c.Spec.ConfigBase)
|
||||
if err != nil {
|
||||
|
|
|
@ -60,7 +60,7 @@ func validateEtcdClusterUpdate(fp *field.Path, obj *kops.EtcdClusterSpec, status
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if obj.Name != old.Name {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("Name"), "Name cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("name"), "name cannot be changed"))
|
||||
}
|
||||
|
||||
var etcdClusterStatus *kops.EtcdClusterStatus
|
||||
|
@ -85,7 +85,7 @@ func validateEtcdClusterUpdate(fp *field.Path, obj *kops.EtcdClusterSpec, status
|
|||
}
|
||||
|
||||
for k, newMember := range newMembers {
|
||||
fp := fp.Child("Members").Key(k)
|
||||
fp := fp.Child("etcdMembers").Key(k)
|
||||
|
||||
oldMember := oldMembers[k]
|
||||
if oldMember == nil {
|
||||
|
@ -97,7 +97,7 @@ func validateEtcdClusterUpdate(fp *field.Path, obj *kops.EtcdClusterSpec, status
|
|||
for k := range oldMembers {
|
||||
newCluster := newMembers[k]
|
||||
if newCluster == nil {
|
||||
fp := fp.Child("Members").Key(k)
|
||||
fp := fp.Child("etcdMembers").Key(k)
|
||||
allErrs = append(allErrs, field.Forbidden(fp, "EtcdCluster members cannot be removed"))
|
||||
}
|
||||
}
|
||||
|
@ -110,31 +110,31 @@ func validateEtcdMemberUpdate(fp *field.Path, obj *kops.EtcdMemberSpec, status *
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if obj.Name != old.Name {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("Name"), "Name cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("name"), "name cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.StringValue(obj.InstanceGroup) != fi.StringValue(old.InstanceGroup) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("InstanceGroup"), "InstanceGroup cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("instanceGroup"), "instanceGroup cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.StringValue(obj.VolumeType) != fi.StringValue(old.VolumeType) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("VolumeType"), "VolumeType cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("volumeType"), "volumeType cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.Int32Value(obj.VolumeIops) != fi.Int32Value(old.VolumeIops) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("VolumeIops"), "VolumeIops cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("volumeIops"), "volumeIops cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.Int32Value(obj.VolumeSize) != fi.Int32Value(old.VolumeSize) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("VolumeSize"), "VolumeSize cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("volumeSize"), "volumeSize cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.StringValue(obj.KmsKeyId) != fi.StringValue(old.KmsKeyId) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("KmsKeyId"), "KmsKeyId cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("kmsKeyId"), "kmsKeyId cannot be changed"))
|
||||
}
|
||||
|
||||
if fi.BoolValue(obj.EncryptedVolume) != fi.BoolValue(old.EncryptedVolume) {
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("EncryptedVolume"), "EncryptedVolume cannot be changed"))
|
||||
allErrs = append(allErrs, field.Forbidden(fp.Child("encryptedVolume"), "encryptedVolume cannot be changed"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
|
@ -31,19 +31,19 @@ func gceValidateCluster(c *kops.Cluster) field.ErrorList {
|
|||
|
||||
regions := sets.NewString()
|
||||
for i, subnet := range c.Spec.Subnets {
|
||||
f := fieldSpec.Child("Subnets").Index(i)
|
||||
f := fieldSpec.Child("subnets").Index(i)
|
||||
if subnet.Zone != "" {
|
||||
allErrs = append(allErrs, field.Invalid(f.Child("Zone"), subnet.Zone, "zones should not be specified for GCE subnets, as GCE subnets are regional"))
|
||||
allErrs = append(allErrs, field.Invalid(f.Child("zone"), subnet.Zone, "zones should not be specified for GCE subnets, as GCE subnets are regional"))
|
||||
}
|
||||
if subnet.Region == "" {
|
||||
allErrs = append(allErrs, field.Required(f.Child("Region"), "region must be specified for GCE subnets"))
|
||||
allErrs = append(allErrs, field.Required(f.Child("region"), "region must be specified for GCE subnets"))
|
||||
} else {
|
||||
regions.Insert(subnet.Region)
|
||||
}
|
||||
}
|
||||
|
||||
if len(regions) > 1 {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Subnets"), strings.Join(regions.List(), ","), "clusters cannot span GCE regions"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("subnets"), strings.Join(regions.List(), ","), "clusters cannot span GCE regions"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
|
@ -33,50 +33,50 @@ func ValidateInstanceGroup(g *kops.InstanceGroup) field.ErrorList {
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if g.ObjectMeta.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("Name"), ""))
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("objectMeta", "name"), ""))
|
||||
}
|
||||
|
||||
switch g.Spec.Role {
|
||||
case "":
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("Role"), "Role must be set"))
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("spec", "role"), "Role must be set"))
|
||||
case kops.InstanceGroupRoleMaster:
|
||||
if len(g.Spec.Subnets) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("Subnets"), "master InstanceGroup must specify at least one Subnet"))
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("spec", "subnets"), "master InstanceGroup must specify at least one Subnet"))
|
||||
}
|
||||
case kops.InstanceGroupRoleNode:
|
||||
case kops.InstanceGroupRoleBastion:
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("Role"), g.Spec.Role, "Unknown role"))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "role"), g.Spec.Role, "Unknown role"))
|
||||
}
|
||||
|
||||
if g.Spec.Tenancy != "" {
|
||||
if g.Spec.Tenancy != "default" && g.Spec.Tenancy != "dedicated" && g.Spec.Tenancy != "host" {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("Tenancy"), g.Spec.Tenancy, "Unknown tenancy. Must be Default, Dedicated or Host."))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "tenancy"), g.Spec.Tenancy, "Unknown tenancy. Must be Default, Dedicated or Host."))
|
||||
}
|
||||
}
|
||||
|
||||
if g.Spec.MaxSize != nil && g.Spec.MinSize != nil {
|
||||
if *g.Spec.MaxSize < *g.Spec.MinSize {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("MaxSize"), *g.Spec.MaxSize, "maxSize must be greater than or equal to minSize."))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "maxSize"), *g.Spec.MaxSize, "maxSize must be greater than or equal to minSize."))
|
||||
}
|
||||
}
|
||||
|
||||
if fi.Int32Value(g.Spec.RootVolumeIops) < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("RootVolumeIops"), g.Spec.RootVolumeIops, "RootVolumeIops must be greater than 0"))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "rootVolumeIops"), g.Spec.RootVolumeIops, "RootVolumeIops must be greater than 0"))
|
||||
}
|
||||
|
||||
// @check all the hooks are valid in this instancegroup
|
||||
for i := range g.Spec.Hooks {
|
||||
allErrs = append(allErrs, validateHookSpec(&g.Spec.Hooks[i], field.NewPath("hooks").Index(i))...)
|
||||
allErrs = append(allErrs, validateHookSpec(&g.Spec.Hooks[i], field.NewPath("spec", "hooks").Index(i))...)
|
||||
}
|
||||
|
||||
// @check the fileAssets for this instancegroup are valid
|
||||
for i := range g.Spec.FileAssets {
|
||||
allErrs = append(allErrs, validateFileAssetSpec(&g.Spec.FileAssets[i], field.NewPath("fileAssets").Index(i))...)
|
||||
allErrs = append(allErrs, validateFileAssetSpec(&g.Spec.FileAssets[i], field.NewPath("spec", "fileAssets").Index(i))...)
|
||||
}
|
||||
|
||||
if g.Spec.MixedInstancesPolicy != nil {
|
||||
allErrs = append(allErrs, validatedMixedInstancesPolicy(field.NewPath(g.Name), g.Spec.MixedInstancesPolicy, g)...)
|
||||
allErrs = append(allErrs, validatedMixedInstancesPolicy(field.NewPath("spec", "mixedInstancesPolicy"), g.Spec.MixedInstancesPolicy, g)...)
|
||||
}
|
||||
|
||||
for _, UserDataInfo := range g.Spec.AdditionalUserData {
|
||||
|
@ -86,7 +86,7 @@ func ValidateInstanceGroup(g *kops.InstanceGroup) field.ErrorList {
|
|||
// @step: iterate and check the volume specs
|
||||
for i, x := range g.Spec.Volumes {
|
||||
devices := make(map[string]bool)
|
||||
path := field.NewPath("volumes").Index(i)
|
||||
path := field.NewPath("spec", "volumes").Index(i)
|
||||
|
||||
allErrs = append(allErrs, validateVolumeSpec(path, x)...)
|
||||
|
||||
|
@ -101,7 +101,7 @@ func ValidateInstanceGroup(g *kops.InstanceGroup) field.ErrorList {
|
|||
// @step: iterate and check the volume mount specs
|
||||
for i, x := range g.Spec.VolumeMounts {
|
||||
used := make(map[string]bool)
|
||||
path := field.NewPath("volumeMounts").Index(i)
|
||||
path := field.NewPath("spec", "volumeMounts").Index(i)
|
||||
|
||||
allErrs = append(allErrs, validateVolumeMountSpec(path, x)...)
|
||||
if _, found := used[x.Device]; found {
|
||||
|
@ -112,10 +112,10 @@ func ValidateInstanceGroup(g *kops.InstanceGroup) field.ErrorList {
|
|||
}
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateInstanceProfile(g.Spec.IAM, field.NewPath("iam"))...)
|
||||
allErrs = append(allErrs, validateInstanceProfile(g.Spec.IAM, field.NewPath("spec", "iam"))...)
|
||||
|
||||
if g.Spec.RollingUpdate != nil {
|
||||
allErrs = append(allErrs, validateRollingUpdate(g.Spec.RollingUpdate, field.NewPath("rollingUpdate"))...)
|
||||
allErrs = append(allErrs, validateRollingUpdate(g.Spec.RollingUpdate, field.NewPath("spec", "rollingUpdate"))...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -208,8 +208,8 @@ func CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, st
|
|||
|
||||
for i, z := range g.Spec.Subnets {
|
||||
if clusterSubnets[z] == nil {
|
||||
// TODO field.NotFound(field.NewPath("spec.subnets").Index(i), z) ?
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec.subnets").Index(i), z,
|
||||
// TODO field.NotFound(field.NewPath("spec", "subnets").Index(i), z) ?
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "subnets").Index(i), z,
|
||||
fmt.Sprintf("InstanceGroup %q is configured in %q, but this is not configured as a Subnet in the cluster", g.ObjectMeta.Name, z)))
|
||||
}
|
||||
}
|
||||
|
@ -220,14 +220,14 @@ func CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, st
|
|||
|
||||
func validateExtraUserData(userData *kops.UserData) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
fieldPath := field.NewPath("AdditionalUserData")
|
||||
fieldPath := field.NewPath("additionalUserData")
|
||||
|
||||
if userData.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Name"), "field must be set"))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "field must be set"))
|
||||
}
|
||||
|
||||
if userData.Content == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Content"), "field must be set"))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("content"), "field must be set"))
|
||||
}
|
||||
|
||||
switch userData.Type {
|
||||
|
@ -241,7 +241,7 @@ func validateExtraUserData(userData *kops.UserData) field.ErrorList {
|
|||
case "text/cloud-boothook":
|
||||
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Child("Type"), userData.Type, "Invalid user-data content type"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Child("type"), userData.Type, "Invalid user-data content type"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -255,7 +255,7 @@ func validateInstanceProfile(v *kops.IAMProfileSpec, fldPath *field.Path) field.
|
|||
instanceProfileARN := *v.Profile
|
||||
parsedARN, err := arn.Parse(instanceProfileARN)
|
||||
if err != nil || !strings.HasPrefix(parsedARN.Resource, "instance-profile") {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("Profile"), instanceProfileARN,
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("profile"), instanceProfileARN,
|
||||
"Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole"))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,20 +57,20 @@ func TestValidateInstanceProfile(t *testing.T) {
|
|||
Input: &kops.IAMProfileSpec{
|
||||
Profile: s("42"),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::IAMProfile.Profile"},
|
||||
ExpectedErrors: []string{"Invalid value::iam.profile"},
|
||||
ExpectedDetail: "Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole",
|
||||
},
|
||||
{
|
||||
Input: &kops.IAMProfileSpec{
|
||||
Profile: s("arn:aws:iam::123456789012:group/division_abc/subdivision_xyz/product_A/Developers"),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::IAMProfile.Profile"},
|
||||
ExpectedErrors: []string{"Invalid value::iam.profile"},
|
||||
ExpectedDetail: "Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile/KopsExampleRole",
|
||||
},
|
||||
}
|
||||
|
||||
for _, g := range grid {
|
||||
allErrs := validateInstanceProfile(g.Input, field.NewPath("IAMProfile"))
|
||||
allErrs := validateInstanceProfile(g.Input, field.NewPath("iam"))
|
||||
testErrors(t, g.Input, allErrs, g.ExpectedErrors)
|
||||
|
||||
if g.ExpectedDetail != "" {
|
||||
|
|
|
@ -47,27 +47,27 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
|
||||
// KubernetesVersion
|
||||
if c.Spec.KubernetesVersion == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("KubernetesVersion"), ""))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubernetesVersion"), ""))
|
||||
} else {
|
||||
sv, err := util.ParseKubernetesVersion(c.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("KubernetesVersion"), c.Spec.KubernetesVersion, "unable to determine kubernetes version"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubernetesVersion"), c.Spec.KubernetesVersion, "unable to determine kubernetes version"))
|
||||
} else {
|
||||
kubernetesRelease = semver.Version{Major: sv.Major, Minor: sv.Minor}
|
||||
}
|
||||
}
|
||||
|
||||
if c.ObjectMeta.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("Name"), "Cluster Name is required (e.g. --name=mycluster.myzone.com)"))
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("objectMeta", "name"), "Cluster Name is required (e.g. --name=mycluster.myzone.com)"))
|
||||
} else {
|
||||
// Must be a dns name
|
||||
errs := validation.IsDNS1123Subdomain(c.ObjectMeta.Name)
|
||||
if len(errs) != 0 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("Name"), c.ObjectMeta.Name, fmt.Sprintf("Cluster Name must be a valid DNS name (e.g. --name=mycluster.myzone.com) errors: %s", strings.Join(errs, ", "))))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("objectMeta", "name"), c.ObjectMeta.Name, fmt.Sprintf("Cluster Name must be a valid DNS name (e.g. --name=mycluster.myzone.com) errors: %s", strings.Join(errs, ", "))))
|
||||
} else if !strings.Contains(c.ObjectMeta.Name, ".") {
|
||||
// Tolerate if this is a cluster we are importing for upgrade
|
||||
if c.ObjectMeta.Annotations[kops.AnnotationNameManagement] != kops.AnnotationValueManagementImported {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("Name"), c.ObjectMeta.Name, "Cluster Name must be a fully-qualified DNS name (e.g. --name=mycluster.myzone.com)"))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("objectMeta", "name"), c.ObjectMeta.Name, "Cluster Name must be a fully-qualified DNS name (e.g. --name=mycluster.myzone.com)"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
requiresSubnetCIDR := true
|
||||
switch kops.CloudProviderID(c.Spec.CloudProvider) {
|
||||
case "":
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("CloudProvider"), ""))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("cloudProvider"), ""))
|
||||
requiresSubnets = false
|
||||
requiresSubnetCIDR = false
|
||||
requiresNetworkCIDR = false
|
||||
|
@ -91,13 +91,13 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
requiresSubnetCIDR = false
|
||||
requiresNetworkCIDR = false
|
||||
if c.Spec.NetworkCIDR != "" {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("NetworkCIDR"), c.Spec.NetworkCIDR, "NetworkCIDR should not be set on bare metal"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networkCIDR"), c.Spec.NetworkCIDR, "networkCIDR should not be set on bare metal"))
|
||||
}
|
||||
|
||||
case kops.CloudProviderGCE:
|
||||
requiresNetworkCIDR = false
|
||||
if c.Spec.NetworkCIDR != "" {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("NetworkCIDR"), c.Spec.NetworkCIDR, "NetworkCIDR should not be set on GCE"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networkCIDR"), c.Spec.NetworkCIDR, "networkCIDR should not be set on GCE"))
|
||||
}
|
||||
requiresSubnetCIDR = false
|
||||
|
||||
|
@ -106,7 +106,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
requiresSubnetCIDR = false
|
||||
requiresNetworkCIDR = false
|
||||
if c.Spec.NetworkCIDR != "" {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("NetworkCIDR"), c.Spec.NetworkCIDR, "NetworkCIDR should not be set on DigitalOcean"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networkCIDR"), c.Spec.NetworkCIDR, "networkCIDR should not be set on DigitalOcean"))
|
||||
}
|
||||
case kops.CloudProviderALI:
|
||||
requiresSubnets = false
|
||||
|
@ -119,37 +119,37 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
requiresSubnetCIDR = false
|
||||
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("CloudProvider"), c.Spec.CloudProvider, "CloudProvider not recognized"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("cloudProvider"), c.Spec.CloudProvider, "cloudProvider not recognized"))
|
||||
}
|
||||
|
||||
if requiresSubnets && len(c.Spec.Subnets) == 0 {
|
||||
// TODO: Auto choose zones from region?
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("Subnets"), "must configure at least one Subnet (use --zones)"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("subnets"), "must configure at least one subnet (use --zones)"))
|
||||
}
|
||||
|
||||
if strict && c.Spec.Kubelet == nil {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("Kubelet"), "Kubelet not configured"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubelet"), "kubelet not configured"))
|
||||
}
|
||||
if strict && c.Spec.MasterKubelet == nil {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("MasterKubelet"), "MasterKubelet not configured"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("masterKubelet"), "masterKubelet not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeControllerManager == nil {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("KubeControllerManager"), "KubeControllerManager not configured"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeControllerManager"), "kubeControllerManager not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeDNS == nil {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("KubeDNS"), "KubeDNS not configured"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeDNS"), "kubeDNS not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeScheduler == nil {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("KubeScheduler"), "KubeScheduler not configured"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeScheduler"), "kubeScheduler not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeAPIServer == nil {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("KubeAPIServer"), "KubeAPIServer not configured"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeAPIServer"), "kubeAPIServer not configured"))
|
||||
}
|
||||
if strict && c.Spec.KubeProxy == nil {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("KubeProxy"), "KubeProxy not configured"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeProxy"), "kubeProxy not configured"))
|
||||
}
|
||||
if strict && c.Spec.Docker == nil {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("Docker"), "Docker not configured"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("docker"), "docker not configured"))
|
||||
}
|
||||
|
||||
// Check NetworkCIDR
|
||||
|
@ -158,12 +158,12 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
{
|
||||
if c.Spec.NetworkCIDR == "" {
|
||||
if requiresNetworkCIDR {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("NetworkCIDR"), "Cluster did not have NetworkCIDR set"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("networkCIDR"), "Cluster did not have networkCIDR set"))
|
||||
}
|
||||
} else {
|
||||
_, networkCIDR, err = net.ParseCIDR(c.Spec.NetworkCIDR)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("NetworkCIDR"), c.Spec.NetworkCIDR, fmt.Sprintf("Cluster had an invalid NetworkCIDR")))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networkCIDR"), c.Spec.NetworkCIDR, fmt.Sprintf("Cluster had an invalid networkCIDR")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
for _, AdditionalNetworkCIDR := range c.Spec.AdditionalNetworkCIDRs {
|
||||
_, IPNetAdditionalNetworkCIDR, err := net.ParseCIDR(AdditionalNetworkCIDR)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("AdditionalNetworkCIDRs"), AdditionalNetworkCIDR, fmt.Sprintf("Cluster had an invalid AdditionalNetworkCIDRs")))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("additionalNetworkCIDRs"), AdditionalNetworkCIDR, fmt.Sprintf("Cluster had an invalid additionalNetworkCIDRs")))
|
||||
}
|
||||
additionalNetworkCIDRs = append(additionalNetworkCIDRs, IPNetAdditionalNetworkCIDR)
|
||||
}
|
||||
|
@ -196,26 +196,26 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
nonMasqueradeCIDRString := c.Spec.NonMasqueradeCIDR
|
||||
if nonMasqueradeCIDRString == "" {
|
||||
if nonMasqueradeCIDRRequired {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("NonMasqueradeCIDR"), "Cluster did not have NonMasqueradeCIDR set"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("nonMasqueradeCIDR"), "Cluster did not have nonMasqueradeCIDR set"))
|
||||
}
|
||||
} else {
|
||||
_, nonMasqueradeCIDR, err = net.ParseCIDR(nonMasqueradeCIDRString)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("NonMasqueradeCIDR"), nonMasqueradeCIDRString, "Cluster had an invalid NonMasqueradeCIDR"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("nonMasqueradeCIDR"), nonMasqueradeCIDRString, "Cluster had an invalid nonMasqueradeCIDR"))
|
||||
}
|
||||
|
||||
if networkCIDR != nil && subnet.Overlap(nonMasqueradeCIDR, networkCIDR) && c.Spec.Networking != nil && c.Spec.Networking.AmazonVPC == nil && c.Spec.Networking.LyftVPC == nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("NonMasqueradeCIDR"), nonMasqueradeCIDRString, fmt.Sprintf("NonMasqueradeCIDR %q cannot overlap with NetworkCIDR %q", nonMasqueradeCIDRString, c.Spec.NetworkCIDR)))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("nonMasqueradeCIDR"), nonMasqueradeCIDRString, fmt.Sprintf("nonMasqueradeCIDR %q cannot overlap with networkCIDR %q", nonMasqueradeCIDRString, c.Spec.NetworkCIDR)))
|
||||
}
|
||||
|
||||
if c.Spec.Kubelet != nil && c.Spec.Kubelet.NonMasqueradeCIDR != nonMasqueradeCIDRString {
|
||||
if strict || c.Spec.Kubelet.NonMasqueradeCIDR != "" {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("NonMasqueradeCIDR"), nonMasqueradeCIDRString, "Kubelet NonMasqueradeCIDR did not match cluster NonMasqueradeCIDR"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("nonMasqueradeCIDR"), nonMasqueradeCIDRString, "kubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
|
||||
}
|
||||
}
|
||||
if c.Spec.MasterKubelet != nil && c.Spec.MasterKubelet.NonMasqueradeCIDR != nonMasqueradeCIDRString {
|
||||
if strict || c.Spec.MasterKubelet.NonMasqueradeCIDR != "" {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("NonMasqueradeCIDR"), nonMasqueradeCIDRString, "MasterKubelet NonMasqueradeCIDR did not match cluster NonMasqueradeCIDR"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("nonMasqueradeCIDR"), nonMasqueradeCIDRString, "masterKubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -227,20 +227,20 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
serviceClusterIPRangeString := c.Spec.ServiceClusterIPRange
|
||||
if serviceClusterIPRangeString == "" {
|
||||
if strict {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("ServiceClusterIPRange"), "Cluster did not have ServiceClusterIPRange set"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("serviceClusterIPRange"), "Cluster did not have serviceClusterIPRange set"))
|
||||
}
|
||||
} else {
|
||||
_, serviceClusterIPRange, err = net.ParseCIDR(serviceClusterIPRangeString)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("ServiceClusterIPRange"), serviceClusterIPRangeString, "Cluster had an invalid ServiceClusterIPRange"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("serviceClusterIPRange"), serviceClusterIPRangeString, "Cluster had an invalid serviceClusterIPRange"))
|
||||
} else {
|
||||
if nonMasqueradeCIDR != nil && serviceClusterMustBeSubnetOfNonMasqueradeCIDR && !subnet.BelongsTo(nonMasqueradeCIDR, serviceClusterIPRange) {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("ServiceClusterIPRange"), serviceClusterIPRangeString, fmt.Sprintf("ServiceClusterIPRange %q must be a subnet of NonMasqueradeCIDR %q", serviceClusterIPRangeString, c.Spec.NonMasqueradeCIDR)))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("serviceClusterIPRange"), serviceClusterIPRangeString, fmt.Sprintf("serviceClusterIPRange %q must be a subnet of nonMasqueradeCIDR %q", serviceClusterIPRangeString, c.Spec.NonMasqueradeCIDR)))
|
||||
}
|
||||
|
||||
if c.Spec.KubeAPIServer != nil && c.Spec.KubeAPIServer.ServiceClusterIPRange != serviceClusterIPRangeString {
|
||||
if strict || c.Spec.KubeAPIServer.ServiceClusterIPRange != "" {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("ServiceClusterIPRange"), serviceClusterIPRangeString, "KubeAPIServer ServiceClusterIPRange did not match cluster ServiceClusterIPRange"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("serviceClusterIPRange"), serviceClusterIPRangeString, "kubeAPIServer serviceClusterIPRange did not match cluster serviceClusterIPRange"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -253,21 +253,21 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
switch action {
|
||||
case "", "ACCEPT", "DROP", "RETURN":
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Networking", "Canal", "DefaultEndpointToHostAction"), action, fmt.Sprintf("Unsupported value: %s, supports 'ACCEPT', 'DROP' or 'RETURN'", action)))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking", "canal", "defaultEndpointToHostAction"), action, fmt.Sprintf("Unsupported value: %s, supports 'ACCEPT', 'DROP' or 'RETURN'", action)))
|
||||
}
|
||||
|
||||
chainInsertMode := c.Spec.Networking.Canal.ChainInsertMode
|
||||
switch chainInsertMode {
|
||||
case "", "insert", "append":
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Networking", "Canal", "ChainInsertMode"), chainInsertMode, fmt.Sprintf("Unsupported value: %s, supports 'insert' or 'append'", chainInsertMode)))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking", "canal", "chainInsertMode"), chainInsertMode, fmt.Sprintf("Unsupported value: %s, supports 'insert' or 'append'", chainInsertMode)))
|
||||
}
|
||||
|
||||
logSeveritySys := c.Spec.Networking.Canal.LogSeveritySys
|
||||
switch logSeveritySys {
|
||||
case "", "INFO", "DEBUG", "WARNING", "ERROR", "CRITICAL", "NONE":
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Networking", "Canal", "LogSeveritySys"), logSeveritySys, fmt.Sprintf("Unsupported value: %s, supports 'INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL' or 'NONE'", logSeveritySys)))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking", "canal", "logSeveritySys"), logSeveritySys, fmt.Sprintf("Unsupported value: %s, supports 'INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL' or 'NONE'", logSeveritySys)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -278,9 +278,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
if clusterCIDRString != "" {
|
||||
_, clusterCIDR, err = net.ParseCIDR(clusterCIDRString)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("KubeControllerManager", "ClusterCIDR"), clusterCIDRString, "Cluster had an invalid KubeControllerManager.ClusterCIDR"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), clusterCIDRString, "cluster had an invalid kubeControllerManager.clusterCIDR"))
|
||||
} else if nonMasqueradeCIDR != nil && !subnet.BelongsTo(nonMasqueradeCIDR, clusterCIDR) {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("KubeControllerManager", "ClusterCIDR"), clusterCIDRString, fmt.Sprintf("KubeControllerManager.ClusterCIDR %q must be a subnet of NonMasqueradeCIDR %q", clusterCIDRString, c.Spec.NonMasqueradeCIDR)))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), clusterCIDRString, fmt.Sprintf("kubeControllerManager.clusterCIDR %q must be a subnet of nonMasqueradeCIDR %q", clusterCIDRString, c.Spec.NonMasqueradeCIDR)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -354,23 +354,23 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
if k8sCloudProvider != "ignore" {
|
||||
if c.Spec.Kubelet != nil && (strict || c.Spec.Kubelet.CloudProvider != "") {
|
||||
if c.Spec.Kubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.Kubelet.CloudProvider {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Kubelet", "CloudProvider"), c.Spec.Kubelet.CloudProvider, "Did not match cluster CloudProvider"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubelet", "cloudProvider"), c.Spec.Kubelet.CloudProvider, "Did not match cluster cloudProvider"))
|
||||
}
|
||||
}
|
||||
if c.Spec.MasterKubelet != nil && (strict || c.Spec.MasterKubelet.CloudProvider != "") {
|
||||
if c.Spec.MasterKubelet.CloudProvider != "external" && k8sCloudProvider != c.Spec.MasterKubelet.CloudProvider {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("MasterKubelet", "CloudProvider"), c.Spec.MasterKubelet.CloudProvider, "Did not match cluster CloudProvider"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("masterKubelet", "cloudProvider"), c.Spec.MasterKubelet.CloudProvider, "Did not match cluster cloudProvider"))
|
||||
|
||||
}
|
||||
}
|
||||
if c.Spec.KubeAPIServer != nil && (strict || c.Spec.KubeAPIServer.CloudProvider != "") {
|
||||
if c.Spec.KubeAPIServer.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeAPIServer.CloudProvider {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("KubeAPIServer", "CloudProvider"), c.Spec.KubeAPIServer.CloudProvider, "Did not match cluster CloudProvider"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeAPIServer", "cloudProvider"), c.Spec.KubeAPIServer.CloudProvider, "Did not match cluster cloudProvider"))
|
||||
}
|
||||
}
|
||||
if c.Spec.KubeControllerManager != nil && (strict || c.Spec.KubeControllerManager.CloudProvider != "") {
|
||||
if c.Spec.KubeControllerManager.CloudProvider != "external" && k8sCloudProvider != c.Spec.KubeControllerManager.CloudProvider {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("KubeControllerManager", "CloudProvider"), c.Spec.KubeControllerManager.CloudProvider, "Did not match cluster CloudProvider"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeControllerManager", "cloudProvider"), c.Spec.KubeControllerManager.CloudProvider, "Did not match cluster cloudProvider"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -379,17 +379,17 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
// Check that the subnet CIDRs are all consistent
|
||||
{
|
||||
for i, s := range c.Spec.Subnets {
|
||||
fieldSubnet := fieldSpec.Child("Subnets").Index(i)
|
||||
fieldSubnet := fieldSpec.Child("subnets").Index(i)
|
||||
if s.CIDR == "" {
|
||||
if requiresSubnetCIDR && strict {
|
||||
allErrs = append(allErrs, field.Required(fieldSubnet.Child("CIDR"), "Subnet did not have a CIDR set"))
|
||||
allErrs = append(allErrs, field.Required(fieldSubnet.Child("cidr"), "subnet did not have a cidr set"))
|
||||
}
|
||||
} else {
|
||||
_, subnetCIDR, err := net.ParseCIDR(s.CIDR)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("CIDR"), s.CIDR, "Subnet had an invalid CIDR"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("cidr"), s.CIDR, "subnet had an invalid cidr"))
|
||||
} else if networkCIDR != nil && !validateSubnetCIDR(networkCIDR, additionalNetworkCIDRs, subnetCIDR) {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("CIDR"), s.CIDR, fmt.Sprintf("Subnet %q had a CIDR %q that was not a subnet of the NetworkCIDR %q", s.Name, s.CIDR, c.Spec.NetworkCIDR)))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("cidr"), s.CIDR, fmt.Sprintf("subnet %q had a cidr %q that was not a subnet of the networkCIDR %q", s.Name, s.CIDR, c.Spec.NetworkCIDR)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -399,12 +399,12 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
if c.Spec.NodeAuthorization != nil {
|
||||
// @check the feature gate is enabled for this
|
||||
if !featureflag.EnableNodeAuthorization.Enabled() {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("nodeAuthorization"), nil, "node authorization is experimental feature; set `export KOPS_FEATURE_FLAGS=EnableNodeAuthorization`"))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "nodeAuthorization"), nil, "node authorization is experimental feature; set `export KOPS_FEATURE_FLAGS=EnableNodeAuthorization`"))
|
||||
} else {
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer == nil {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("nodeAuthorization"), nil, "no node authorization policy has been set"))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "nodeAuthorization"), nil, "no node authorization policy has been set"))
|
||||
} else {
|
||||
path := field.NewPath("nodeAuthorization").Child("nodeAuthorizer")
|
||||
path := field.NewPath("spec", "nodeAuthorization").Child("nodeAuthorizer")
|
||||
if c.Spec.NodeAuthorization.NodeAuthorizer.Port < 0 || c.Spec.NodeAuthorization.NodeAuthorizer.Port >= 65535 {
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("port"), c.Spec.NodeAuthorization.NodeAuthorizer.Port, "invalid port"))
|
||||
}
|
||||
|
@ -417,11 +417,11 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
|
||||
// @question: we could probably just default these settings in the model when the node-authorizer is enabled??
|
||||
if c.Spec.KubeAPIServer == nil {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("kubeAPIServer"), c.Spec.KubeAPIServer, "bootstrap token authentication is not enabled in the kube-apiserver"))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "kubeAPIServer"), c.Spec.KubeAPIServer, "bootstrap token authentication is not enabled in the kube-apiserver"))
|
||||
} else if c.Spec.KubeAPIServer.EnableBootstrapAuthToken == nil {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("kubeAPIServer").Child("enableBootstrapAuthToken"), nil, "kube-apiserver has not been configured to use bootstrap tokens"))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "kubeAPIServer").Child("enableBootstrapAuthToken"), nil, "kube-apiserver has not been configured to use bootstrap tokens"))
|
||||
} else if !fi.BoolValue(c.Spec.KubeAPIServer.EnableBootstrapAuthToken) {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("kubeAPIServer").Child("enableBootstrapAuthToken"),
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "kubeAPIServer").Child("enableBootstrapAuthToken"),
|
||||
c.Spec.KubeAPIServer.EnableBootstrapAuthToken, "bootstrap tokens in the kube-apiserver has been disabled"))
|
||||
}
|
||||
}
|
||||
|
@ -434,23 +434,23 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
case kops.UpdatePolicyExternal:
|
||||
// Valid
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("UpdatePolicy"), *c.Spec.UpdatePolicy, "unrecognized value for UpdatePolicy"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("updatePolicy"), *c.Spec.UpdatePolicy, "unrecognized value for updatePolicy"))
|
||||
}
|
||||
}
|
||||
|
||||
// KubeProxy
|
||||
if c.Spec.KubeProxy != nil {
|
||||
kubeProxyPath := fieldSpec.Child("KubeProxy")
|
||||
kubeProxyPath := fieldSpec.Child("kubeProxy")
|
||||
master := c.Spec.KubeProxy.Master
|
||||
|
||||
for i, x := range c.Spec.KubeProxy.IPVSExcludeCIDRS {
|
||||
if _, _, err := net.ParseCIDR(x); err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("ipvsExcludeCIDRS").Index(i), x, "Invalid network CIDR"))
|
||||
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("ipvsExcludeCidrs").Index(i), x, "Invalid network CIDR"))
|
||||
}
|
||||
}
|
||||
|
||||
if master != "" && !isValidAPIServersURL(master) {
|
||||
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("Master"), master, "Not a valid APIServer URL"))
|
||||
allErrs = append(allErrs, field.Invalid(kubeProxyPath.Child("master"), master, "Not a valid APIServer URL"))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -459,9 +459,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
if kubernetesRelease.GTE(semver.MustParse("1.10.0")) {
|
||||
if len(c.Spec.KubeAPIServer.AdmissionControl) > 0 {
|
||||
if len(c.Spec.KubeAPIServer.DisableAdmissionPlugins) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("KubeAPIServer").Child("DisableAdmissionPlugins"),
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeAPIServer").Child("disableAdmissionPlugins"),
|
||||
strings.Join(c.Spec.KubeAPIServer.DisableAdmissionPlugins, ","),
|
||||
"DisableAdmissionPlugins is mutually exclusive, you cannot use both AdmissionControl and DisableAdmissionPlugins together"))
|
||||
"disableAdmissionPlugins is mutually exclusive, you cannot use both admissionControl and disableAdmissionPlugins together"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -469,13 +469,13 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
|
||||
// Kubelet
|
||||
if c.Spec.Kubelet != nil {
|
||||
kubeletPath := fieldSpec.Child("Kubelet")
|
||||
kubeletPath := fieldSpec.Child("kubelet")
|
||||
|
||||
{
|
||||
// Flag removed in 1.6
|
||||
if c.Spec.Kubelet.APIServers != "" {
|
||||
allErrs = append(allErrs, field.Invalid(
|
||||
kubeletPath.Child("APIServers"),
|
||||
kubeletPath.Child("apiServers"),
|
||||
c.Spec.Kubelet.APIServers,
|
||||
"api-servers flag was removed in 1.6"))
|
||||
}
|
||||
|
@ -493,24 +493,24 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
|
||||
if c.Spec.Kubelet.BootstrapKubeconfig != "" {
|
||||
if c.Spec.KubeAPIServer == nil {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("KubeAPIServer"), "bootstrap token require the NodeRestriction admissions controller"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("kubeAPIServer"), "bootstrap token require the NodeRestriction admissions controller"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.Spec.Kubelet.APIServers != "" && !isValidAPIServersURL(c.Spec.Kubelet.APIServers) {
|
||||
allErrs = append(allErrs, field.Invalid(kubeletPath.Child("APIServers"), c.Spec.Kubelet.APIServers, "Not a valid APIServer URL"))
|
||||
allErrs = append(allErrs, field.Invalid(kubeletPath.Child("apiServers"), c.Spec.Kubelet.APIServers, "Not a valid apiServer URL"))
|
||||
}
|
||||
}
|
||||
|
||||
// MasterKubelet
|
||||
if c.Spec.MasterKubelet != nil {
|
||||
masterKubeletPath := fieldSpec.Child("MasterKubelet")
|
||||
masterKubeletPath := fieldSpec.Child("masterKubelet")
|
||||
|
||||
{
|
||||
// Flag removed in 1.6
|
||||
if c.Spec.MasterKubelet.APIServers != "" {
|
||||
allErrs = append(allErrs, field.Invalid(
|
||||
masterKubeletPath.Child("APIServers"),
|
||||
masterKubeletPath.Child("apiServers"),
|
||||
c.Spec.MasterKubelet.APIServers,
|
||||
"api-servers flag was removed in 1.6"))
|
||||
}
|
||||
|
@ -527,7 +527,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
}
|
||||
|
||||
if c.Spec.MasterKubelet.APIServers != "" && !isValidAPIServersURL(c.Spec.MasterKubelet.APIServers) {
|
||||
allErrs = append(allErrs, field.Invalid(masterKubeletPath.Child("APIServers"), c.Spec.MasterKubelet.APIServers, "Not a valid APIServer URL"))
|
||||
allErrs = append(allErrs, field.Invalid(masterKubeletPath.Child("apiServers"), c.Spec.MasterKubelet.APIServers, "Not a valid apiServers URL"))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -535,25 +535,25 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
if c.Spec.Topology != nil {
|
||||
if c.Spec.Topology.Masters != "" && c.Spec.Topology.Nodes != "" {
|
||||
if c.Spec.Topology.Masters != kops.TopologyPublic && c.Spec.Topology.Masters != kops.TopologyPrivate {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Topology", "Masters"), c.Spec.Topology.Masters, "Invalid Masters value for Topology"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "masters"), c.Spec.Topology.Masters, "Invalid masters value for topology"))
|
||||
}
|
||||
if c.Spec.Topology.Nodes != kops.TopologyPublic && c.Spec.Topology.Nodes != kops.TopologyPrivate {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Topology", "Nodes"), c.Spec.Topology.Nodes, "Invalid Nodes value for Topology"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "nodes"), c.Spec.Topology.Nodes, "Invalid nodes value for topology"))
|
||||
}
|
||||
|
||||
} else {
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("Masters"), "Topology requires non-nil values for Masters and Nodes"))
|
||||
allErrs = append(allErrs, field.Required(fieldSpec.Child("masters"), "topology requires non-nil values for masters and nodes"))
|
||||
}
|
||||
if c.Spec.Topology.Bastion != nil {
|
||||
bastion := c.Spec.Topology.Bastion
|
||||
if c.Spec.Topology.Masters == kops.TopologyPublic || c.Spec.Topology.Nodes == kops.TopologyPublic {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Topology", "Masters"), c.Spec.Topology.Masters, "Bastion supports only Private Masters and Nodes"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "masters"), c.Spec.Topology.Masters, "bastion supports only private masters and nodes"))
|
||||
}
|
||||
if bastion.IdleTimeoutSeconds != nil && *bastion.IdleTimeoutSeconds <= 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Topology", "Bastion", "IdleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "Bastion IdleTimeoutSeconds should be greater than zero"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "bastion", "idleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "bastion idleTimeoutSeconds should be greater than zero"))
|
||||
}
|
||||
if bastion.IdleTimeoutSeconds != nil && *bastion.IdleTimeoutSeconds > 3600 {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Topology", "Bastion", "IdleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "Bastion IdleTimeoutSeconds cannot be greater than one hour"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("topology", "bastion", "idleTimeoutSeconds"), *bastion.IdleTimeoutSeconds, "bastion idleTimeoutSeconds cannot be greater than one hour"))
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -564,19 +564,19 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
if s.Egress == "" {
|
||||
continue
|
||||
}
|
||||
fieldSubnet := fieldSpec.Child("Subnets").Index(i)
|
||||
fieldSubnet := fieldSpec.Child("subnets").Index(i)
|
||||
if !strings.HasPrefix(s.Egress, "nat-") && !strings.HasPrefix(s.Egress, "i-") && s.Egress != kops.EgressExternal {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("Egress"), s.Egress, "egress must be of type NAT Gateway or NAT EC2 Instance or 'External'"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("egress"), s.Egress, "egress must be of type NAT Gateway or NAT EC2 Instance or 'External'"))
|
||||
}
|
||||
if s.Egress != kops.EgressExternal && s.Type != "Private" {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("Egress"), s.Egress, "egress can only be specified for Private subnets"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("egress"), s.Egress, "egress can only be specified for private subnets"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Etcd
|
||||
{
|
||||
fieldEtcdClusters := fieldSpec.Child("EtcdClusters")
|
||||
fieldEtcdClusters := fieldSpec.Child("etcdClusters")
|
||||
|
||||
if len(c.Spec.EtcdClusters) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fieldEtcdClusters, ""))
|
||||
|
@ -591,19 +591,19 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
|
|||
|
||||
{
|
||||
if c.Spec.Networking != nil && c.Spec.Networking.Classic != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Networking"), "classic", "classic networking is not supported with kubernetes versions 1.4 and later"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking"), "classic", "classic networking is not supported with kubernetes versions 1.4 and later"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.Spec.Networking != nil && (c.Spec.Networking.AmazonVPC != nil || c.Spec.Networking.LyftVPC != nil) &&
|
||||
c.Spec.CloudProvider != "aws" {
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("Networking"), "amazon-vpc-routed-eni", "amazon-vpc-routed-eni networking is supported only in AWS"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking"), "amazon-vpc-routed-eni", "amazon-vpc-routed-eni networking is supported only in AWS"))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, newValidateCluster(c)...)
|
||||
|
||||
if c.Spec.Networking != nil && c.Spec.Networking.Cilium != nil && c.Spec.Networking.Cilium.EnableNodePort && c.Spec.KubeProxy != nil && *c.Spec.KubeProxy.Enabled {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("KubeProxy").Child("enabled"), "When Cilium NodePort is enabled, KubeProxy must be disabled"))
|
||||
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeProxy").Child("enabled"), "When kilium NodePort is enabled, kubeProxy must be disabled"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -628,13 +628,13 @@ func validateSubnetCIDR(networkCIDR *net.IPNet, additionalNetworkCIDRs []*net.IP
|
|||
func validateEtcdClusterSpecLegacy(spec *kops.EtcdClusterSpec, fieldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if spec.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Name"), "EtcdCluster did not have name"))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "etcdCluster did not have name"))
|
||||
}
|
||||
if len(spec.Members) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Members"), "No members defined in etcd cluster"))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("members"), "No members defined in etcd cluster"))
|
||||
} else if (len(spec.Members) % 2) == 0 {
|
||||
// Not technically a requirement, but doesn't really make sense to allow
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Child("Members"), len(spec.Members), "Should be an odd number of master-zones for quorum. Use --zones and --master-zones to declare node zones and master zones separately"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Child("members"), len(spec.Members), "Should be an odd number of master-zones for quorum. Use --zones and --master-zones to declare node zones and master zones separately"))
|
||||
}
|
||||
allErrs = append(allErrs, validateEtcdVersion(spec, fieldPath, nil)...)
|
||||
for _, m := range spec.Members {
|
||||
|
@ -655,7 +655,7 @@ func validateEtcdTLS(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) field
|
|||
}
|
||||
// check both clusters are using tls if one us enabled
|
||||
if usingTLS > 0 && usingTLS != len(specs) {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Index(0).Child("EnableEtcdTLS"), false, "Both etcd clusters must have TLS enabled or none at all"))
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Index(0).Child("enableEtcdTLS"), false, "both etcd clusters must have TLS enabled or none at all"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -667,7 +667,7 @@ func validateEtcdStorage(specs []*kops.EtcdClusterSpec, fieldPath *field.Path) f
|
|||
version := specs[0].Version
|
||||
for i, x := range specs {
|
||||
if x.Version != "" && x.Version != version {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Index(i).Child("Version"), x.Version, fmt.Sprintf("cluster: %q, has a different storage versions: %q, both must be the same", x.Name, x.Version)))
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath.Index(i).Child("version"), x.Version, fmt.Sprintf("cluster: %q, has a different storage versions: %q, both must be the same", x.Name, x.Version)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -691,29 +691,29 @@ func validateEtcdVersion(spec *kops.EtcdClusterSpec, fieldPath *field.Path, mini
|
|||
|
||||
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
|
||||
if err != nil {
|
||||
return field.ErrorList{field.Invalid(fieldPath.Child("Version"), version, "the storage version is invalid")}
|
||||
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "the storage version is invalid")}
|
||||
}
|
||||
|
||||
// we only support v3 and v2 for now
|
||||
if sem.Major == 3 || sem.Major == 2 {
|
||||
if sem.LT(*minimalVersion) {
|
||||
return field.ErrorList{field.Invalid(fieldPath.Child("Version"), version, fmt.Sprintf("minimal version required is %s", minimalVersion.String()))}
|
||||
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, fmt.Sprintf("minimal version required is %s", minimalVersion.String()))}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return field.ErrorList{field.Invalid(fieldPath.Child("Version"), version, "unsupported storage version, we only support major versions 2 and 3")}
|
||||
return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "unsupported storage version, we only support major versions 2 and 3")}
|
||||
}
|
||||
|
||||
// validateEtcdMemberSpec is responsible for validate the cluster member
|
||||
func validateEtcdMemberSpec(spec *kops.EtcdMemberSpec, fieldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if spec.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Name"), "EtcdMember did not have Name"))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), "etcdMember did not have name"))
|
||||
}
|
||||
|
||||
if fi.StringValue(spec.InstanceGroup) == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("InstanceGroup"), "EtcdMember did not have InstanceGroup"))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("instanceGroup"), "etcdMember did not have instanceGroup"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -757,7 +757,7 @@ func DeepValidate(c *kops.Cluster, groups []*kops.InstanceGroup, strict bool) er
|
|||
errs = append(errs, awsValidateInstanceGroup(g)...)
|
||||
default:
|
||||
if len(g.Spec.Volumes) > 0 {
|
||||
errs = append(errs, field.Forbidden(field.NewPath("spec.volumes"), "instancegroup volumes are only available with aws at present"))
|
||||
errs = append(errs, field.Forbidden(field.NewPath("spec", "volumes"), "instancegroup volumes are only available with aws at present"))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ func validateClusterSpec(spec *kops.ClusterSpec, fieldPath *field.Path) field.Er
|
|||
if spec.Networking != nil {
|
||||
allErrs = append(allErrs, validateNetworking(spec, spec.Networking, fieldPath.Child("networking"))...)
|
||||
if spec.Networking.Calico != nil {
|
||||
allErrs = append(allErrs, validateNetworkingCalico(spec.Networking.Calico, spec.EtcdClusters[0], fieldPath.Child("networking").Child("Calico"))...)
|
||||
allErrs = append(allErrs, validateNetworkingCalico(spec.Networking.Calico, spec.EtcdClusters[0], fieldPath.Child("networking").Child("calico"))...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -196,12 +196,12 @@ func validateSubnet(subnet *kops.ClusterSubnetSpec, fieldPath *field.Path) field
|
|||
|
||||
// name is required
|
||||
if subnet.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Name"), ""))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), ""))
|
||||
}
|
||||
|
||||
// CIDR
|
||||
if subnet.CIDR != "" {
|
||||
allErrs = append(allErrs, validateCIDR(subnet.CIDR, fieldPath.Child("CIDR"))...)
|
||||
allErrs = append(allErrs, validateCIDR(subnet.CIDR, fieldPath.Child("cidr"))...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -212,10 +212,10 @@ func validateFileAssetSpec(v *kops.FileAssetSpec, fieldPath *field.Path) field.E
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if v.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Name"), ""))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), ""))
|
||||
}
|
||||
if v.Content == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("Content"), ""))
|
||||
allErrs = append(allErrs, field.Required(fieldPath.Child("content"), ""))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -250,7 +250,7 @@ func validateHookSpec(v *kops.HookSpec, fieldPath *field.Path) field.ErrorList {
|
|||
}
|
||||
|
||||
if v.ExecContainer != nil {
|
||||
allErrs = append(allErrs, validateExecContainerAction(v.ExecContainer, fieldPath.Child("ExecContainer"))...)
|
||||
allErrs = append(allErrs, validateExecContainerAction(v.ExecContainer, fieldPath.Child("execContainer"))...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -260,7 +260,7 @@ func validateExecContainerAction(v *kops.ExecContainerAction, fldPath *field.Pat
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if v.Image == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("Image"), "Image must be specified"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("image"), "image must be specified"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -299,7 +299,7 @@ func validateNetworking(c *kops.ClusterSpec, v *kops.NetworkingSpec, fldPath *fi
|
|||
allErrs := field.ErrorList{}
|
||||
|
||||
if v.Flannel != nil {
|
||||
allErrs = append(allErrs, validateNetworkingFlannel(v.Flannel, fldPath.Child("Flannel"))...)
|
||||
allErrs = append(allErrs, validateNetworkingFlannel(v.Flannel, fldPath.Child("flannel"))...)
|
||||
}
|
||||
|
||||
if v.GCE != nil {
|
||||
|
@ -314,11 +314,11 @@ func validateNetworkingFlannel(v *kops.FlannelNetworkingSpec, fldPath *field.Pat
|
|||
|
||||
switch v.Backend {
|
||||
case "":
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("Backend"), "Flannel backend must be specified"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("backend"), "Flannel backend must be specified"))
|
||||
case "udp", "vxlan":
|
||||
// OK
|
||||
default:
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("Backend"), v.Backend, []string{"udp", "vxlan"}))
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("backend"), v.Backend, []string{"udp", "vxlan"}))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -398,18 +398,18 @@ func ValidateEtcdVersionForCalicoV3(e *kops.EtcdClusterSpec, majorVersion string
|
|||
}
|
||||
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.InternalError(fldPath.Child("MajorVersion"), fmt.Errorf("failed to parse Etcd version to check compatibility: %s", err)))
|
||||
allErrs = append(allErrs, field.InternalError(fldPath.Child("majorVersion"), fmt.Errorf("failed to parse Etcd version to check compatibility: %s", err)))
|
||||
}
|
||||
|
||||
if sem.Major != 3 {
|
||||
if e.Version == "" {
|
||||
allErrs = append(allErrs,
|
||||
field.Invalid(fldPath.Child("MajorVersion"), majorVersion,
|
||||
field.Forbidden(fldPath.Child("majorVersion"),
|
||||
fmt.Sprintf("Unable to use v3 when ETCD version for %s cluster is default(%s)",
|
||||
e.Name, components.DefaultEtcd2Version)))
|
||||
} else {
|
||||
allErrs = append(allErrs,
|
||||
field.Invalid(fldPath.Child("MajorVersion"), majorVersion,
|
||||
field.Forbidden(fldPath.Child("majorVersion"),
|
||||
fmt.Sprintf("Unable to use v3 when ETCD version for %s cluster is %s", e.Name, e.Version)))
|
||||
}
|
||||
}
|
||||
|
@ -422,7 +422,7 @@ func validateNetworkingCalico(v *kops.CalicoNetworkingSpec, e *kops.EtcdClusterS
|
|||
|
||||
} else {
|
||||
allErrs = append(allErrs,
|
||||
field.Invalid(fldPath.Child("TyphaReplicas"), v.TyphaReplicas,
|
||||
field.Invalid(fldPath.Child("typhaReplicas"), v.TyphaReplicas,
|
||||
fmt.Sprintf("Unable to set number of Typha replicas to less than 0, you've specified %d", v.TyphaReplicas)))
|
||||
}
|
||||
switch v.MajorVersion {
|
||||
|
@ -431,7 +431,7 @@ func validateNetworkingCalico(v *kops.CalicoNetworkingSpec, e *kops.EtcdClusterS
|
|||
case "v3":
|
||||
allErrs = append(allErrs, ValidateEtcdVersionForCalicoV3(e, v.MajorVersion, fldPath)...)
|
||||
default:
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("MajorVersion"), v.MajorVersion, []string{"v3"}))
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("majorVersion"), v.MajorVersion, []string{"v3"}))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -451,11 +451,11 @@ func validateRollingUpdate(rollingUpdate *kops.RollingUpdate, fldpath *field.Pat
|
|||
if rollingUpdate.MaxUnavailable != nil {
|
||||
unavailable, err := intstr.GetValueFromIntOrPercent(rollingUpdate.MaxUnavailable, 1, false)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldpath.Child("MaxUnavailable"), rollingUpdate.MaxUnavailable,
|
||||
allErrs = append(allErrs, field.Invalid(fldpath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable,
|
||||
fmt.Sprintf("Unable to parse: %v", err)))
|
||||
}
|
||||
if unavailable < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldpath.Child("MaxUnavailable"), rollingUpdate.MaxUnavailable, "Cannot be negative"))
|
||||
allErrs = append(allErrs, field.Invalid(fldpath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "Cannot be negative"))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -125,14 +125,14 @@ func TestValidateSubnets(t *testing.T) {
|
|||
Input: []kops.ClusterSubnetSpec{
|
||||
{Name: ""},
|
||||
},
|
||||
ExpectedErrors: []string{"Required value::Subnets[0].Name"},
|
||||
ExpectedErrors: []string{"Required value::subnets[0].name"},
|
||||
},
|
||||
{
|
||||
Input: []kops.ClusterSubnetSpec{
|
||||
{Name: "a"},
|
||||
{Name: "a"},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Subnets"},
|
||||
ExpectedErrors: []string{"Invalid value::subnets"},
|
||||
},
|
||||
{
|
||||
Input: []kops.ClusterSubnetSpec{
|
||||
|
@ -145,17 +145,17 @@ func TestValidateSubnets(t *testing.T) {
|
|||
{Name: "a", ProviderID: "a"},
|
||||
{Name: "b", ProviderID: ""},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Subnets"},
|
||||
ExpectedErrors: []string{"Invalid value::subnets"},
|
||||
},
|
||||
{
|
||||
Input: []kops.ClusterSubnetSpec{
|
||||
{Name: "a", CIDR: "10.128.0.0/8"},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Subnets[0].CIDR"},
|
||||
ExpectedErrors: []string{"Invalid value::subnets[0].cidr"},
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
errs := validateSubnets(g.Input, field.NewPath("Subnets"))
|
||||
errs := validateSubnets(g.Input, field.NewPath("subnets"))
|
||||
|
||||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
|
@ -270,13 +270,13 @@ func Test_Validate_Networking_Flannel(t *testing.T) {
|
|||
Input: kops.FlannelNetworkingSpec{
|
||||
Backend: "",
|
||||
},
|
||||
ExpectedErrors: []string{"Required value::Networking.Flannel.Backend"},
|
||||
ExpectedErrors: []string{"Required value::networking.flannel.backend"},
|
||||
},
|
||||
{
|
||||
Input: kops.FlannelNetworkingSpec{
|
||||
Backend: "nope",
|
||||
},
|
||||
ExpectedErrors: []string{"Unsupported value::Networking.Flannel.Backend"},
|
||||
ExpectedErrors: []string{"Unsupported value::networking.flannel.backend"},
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
|
@ -286,7 +286,7 @@ func Test_Validate_Networking_Flannel(t *testing.T) {
|
|||
cluster := &kops.Cluster{}
|
||||
cluster.Spec.Networking = networking
|
||||
|
||||
errs := validateNetworking(&cluster.Spec, networking, field.NewPath("Networking"))
|
||||
errs := validateNetworking(&cluster.Spec, networking, field.NewPath("networking"))
|
||||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
}
|
||||
|
@ -372,7 +372,7 @@ func Test_Validate_Calico(t *testing.T) {
|
|||
},
|
||||
Etcd: &kops.EtcdClusterSpec{},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Calico.TyphaReplicas"},
|
||||
ExpectedErrors: []string{"Invalid value::calico.typhaReplicas"},
|
||||
},
|
||||
{
|
||||
Input: caliInput{
|
||||
|
@ -393,11 +393,11 @@ func Test_Validate_Calico(t *testing.T) {
|
|||
Version: "2.2.18",
|
||||
},
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::Calico.MajorVersion"},
|
||||
ExpectedErrors: []string{"Forbidden::calico.majorVersion"},
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
errs := validateNetworkingCalico(g.Input.Calico, g.Input.Etcd, field.NewPath("Calico"))
|
||||
errs := validateNetworkingCalico(g.Input.Calico, g.Input.Etcd, field.NewPath("calico"))
|
||||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
}
|
||||
|
@ -424,23 +424,23 @@ func Test_Validate_RollingUpdate(t *testing.T) {
|
|||
Input: kops.RollingUpdate{
|
||||
MaxUnavailable: intStr(intstr.FromString("nope")),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"},
|
||||
ExpectedErrors: []string{"Invalid value::testField.maxUnavailable"},
|
||||
},
|
||||
{
|
||||
Input: kops.RollingUpdate{
|
||||
MaxUnavailable: intStr(intstr.FromInt(-1)),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"},
|
||||
ExpectedErrors: []string{"Invalid value::testField.maxUnavailable"},
|
||||
},
|
||||
{
|
||||
Input: kops.RollingUpdate{
|
||||
MaxUnavailable: intStr(intstr.FromString("-1%")),
|
||||
},
|
||||
ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"},
|
||||
ExpectedErrors: []string{"Invalid value::testField.maxUnavailable"},
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
errs := validateRollingUpdate(&g.Input, field.NewPath("TestField"))
|
||||
errs := validateRollingUpdate(&g.Input, field.NewPath("testField"))
|
||||
testErrors(t, g.Input, errs, g.ExpectedErrors)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ func (r *ClusterVFS) Create(c *api.Cluster) (*api.Cluster, error) {
|
|||
func (r *ClusterVFS) Update(c *api.Cluster, status *api.ClusterStatus) (*api.Cluster, error) {
|
||||
clusterName := c.ObjectMeta.Name
|
||||
if clusterName == "" {
|
||||
return nil, field.Required(field.NewPath("Name"), "clusterName is required")
|
||||
return nil, field.Required(field.NewPath("objectMeta", "name"), "clusterName is required")
|
||||
}
|
||||
|
||||
old, err := r.Get(clusterName, metav1.GetOptions{})
|
||||
|
|
|
@ -42,7 +42,7 @@ func RunSetCluster(f *util.Factory, cmd *cobra.Command, out io.Writer, options *
|
|||
}
|
||||
|
||||
if options.ClusterName == "" {
|
||||
return field.Required(field.NewPath("ClusterName"), "Cluster name is required")
|
||||
return field.Required(field.NewPath("clusterName"), "Cluster name is required")
|
||||
}
|
||||
|
||||
clientset, err := f.Clientset()
|
||||
|
|
|
@ -314,42 +314,42 @@ func TestPopulateCluster_Zone_Required(t *testing.T) {
|
|||
c := buildMinimalCluster()
|
||||
c.Spec.Subnets = nil
|
||||
|
||||
expectErrorFromPopulateCluster(t, c, "Subnet")
|
||||
expectErrorFromPopulateCluster(t, c, "subnet")
|
||||
}
|
||||
|
||||
func TestPopulateCluster_NetworkCIDR_Required(t *testing.T) {
|
||||
c := buildMinimalCluster()
|
||||
c.Spec.NetworkCIDR = ""
|
||||
|
||||
expectErrorFromPopulateCluster(t, c, "NetworkCIDR")
|
||||
expectErrorFromPopulateCluster(t, c, "networkCIDR")
|
||||
}
|
||||
|
||||
func TestPopulateCluster_NonMasqueradeCIDR_Required(t *testing.T) {
|
||||
c := buildMinimalCluster()
|
||||
c.Spec.NonMasqueradeCIDR = ""
|
||||
|
||||
expectErrorFromPopulateCluster(t, c, "NonMasqueradeCIDR")
|
||||
expectErrorFromPopulateCluster(t, c, "nonMasqueradeCIDR")
|
||||
}
|
||||
|
||||
func TestPopulateCluster_CloudProvider_Required(t *testing.T) {
|
||||
c := buildMinimalCluster()
|
||||
c.Spec.CloudProvider = ""
|
||||
|
||||
expectErrorFromPopulateCluster(t, c, "CloudProvider")
|
||||
expectErrorFromPopulateCluster(t, c, "cloudProvider")
|
||||
}
|
||||
|
||||
func TestPopulateCluster_TopologyInvalidNil_Required(t *testing.T) {
|
||||
c := buildMinimalCluster()
|
||||
c.Spec.Topology.Masters = ""
|
||||
c.Spec.Topology.Nodes = ""
|
||||
expectErrorFromPopulateCluster(t, c, "Topology")
|
||||
expectErrorFromPopulateCluster(t, c, "topology")
|
||||
}
|
||||
|
||||
func TestPopulateCluster_TopologyInvalidValue_Required(t *testing.T) {
|
||||
c := buildMinimalCluster()
|
||||
c.Spec.Topology.Masters = "123"
|
||||
c.Spec.Topology.Nodes = "abc"
|
||||
expectErrorFromPopulateCluster(t, c, "Topology")
|
||||
expectErrorFromPopulateCluster(t, c, "topology")
|
||||
}
|
||||
|
||||
//func TestPopulateCluster_TopologyInvalidMatchingValues_Required(t *testing.T) {
|
||||
|
@ -367,7 +367,7 @@ func TestPopulateCluster_BastionInvalidMatchingValues_Required(t *testing.T) {
|
|||
c.Spec.Topology.Masters = api.TopologyPublic
|
||||
c.Spec.Topology.Nodes = api.TopologyPublic
|
||||
c.Spec.Topology.Bastion = &api.BastionSpec{}
|
||||
expectErrorFromPopulateCluster(t, c, "Bastion")
|
||||
expectErrorFromPopulateCluster(t, c, "bastion")
|
||||
}
|
||||
|
||||
func TestPopulateCluster_BastionIdleTimeoutInvalidNegative_Required(t *testing.T) {
|
||||
|
@ -378,7 +378,7 @@ func TestPopulateCluster_BastionIdleTimeoutInvalidNegative_Required(t *testing.T
|
|||
c.Spec.Topology.Nodes = api.TopologyPrivate
|
||||
c.Spec.Topology.Bastion = &api.BastionSpec{}
|
||||
c.Spec.Topology.Bastion.IdleTimeoutSeconds = fi.Int64(-1)
|
||||
expectErrorFromPopulateCluster(t, c, "Bastion")
|
||||
expectErrorFromPopulateCluster(t, c, "bastion")
|
||||
}
|
||||
|
||||
func expectErrorFromPopulateCluster(t *testing.T, c *api.Cluster, message string) {
|
||||
|
|
|
@ -49,7 +49,7 @@ func TestPopulateInstanceGroup_Name_Required(t *testing.T) {
|
|||
|
||||
channel := &api.Channel{}
|
||||
|
||||
expectErrorFromPopulateInstanceGroup(t, cluster, g, channel, "Name")
|
||||
expectErrorFromPopulateInstanceGroup(t, cluster, g, channel, "objectMeta.name")
|
||||
}
|
||||
|
||||
func TestPopulateInstanceGroup_Role_Required(t *testing.T) {
|
||||
|
@ -59,7 +59,7 @@ func TestPopulateInstanceGroup_Role_Required(t *testing.T) {
|
|||
|
||||
channel := &api.Channel{}
|
||||
|
||||
expectErrorFromPopulateInstanceGroup(t, cluster, g, channel, "Role")
|
||||
expectErrorFromPopulateInstanceGroup(t, cluster, g, channel, "spec.role")
|
||||
}
|
||||
|
||||
func expectErrorFromPopulateInstanceGroup(t *testing.T, cluster *api.Cluster, g *api.InstanceGroup, channel *api.Channel, message string) {
|
||||
|
|
|
@ -140,7 +140,7 @@ func TestValidateFull_UpdatePolicy_Valid(t *testing.T) {
|
|||
func TestValidateFull_UpdatePolicy_Invalid(t *testing.T) {
|
||||
c := buildDefaultCluster(t)
|
||||
c.Spec.UpdatePolicy = fi.String("not-a-real-value")
|
||||
expectErrorFromValidate(t, c, "UpdatePolicy")
|
||||
expectErrorFromValidate(t, c, "spec.updatePolicy")
|
||||
}
|
||||
|
||||
func Test_Validate_No_Classic_With_14(t *testing.T) {
|
||||
|
@ -150,7 +150,7 @@ func Test_Validate_No_Classic_With_14(t *testing.T) {
|
|||
Classic: &api.ClassicNetworkingSpec{},
|
||||
}
|
||||
|
||||
expectErrorFromValidate(t, c, "spec.Networking")
|
||||
expectErrorFromValidate(t, c, "spec.networking")
|
||||
}
|
||||
|
||||
func Test_Validate_Kubenet_With_14(t *testing.T) {
|
||||
|
|
Loading…
Reference in New Issue