Import package aliases modified

Modified api -> kopsapi aliases of imports of k8s.io/kops/pkg/apis/kops
This commit is contained in:
Martin Tomes 2020-04-17 16:55:08 +02:00
parent 212f0ff5e9
commit 462ca78f2a
16 changed files with 129 additions and 129 deletions

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/pkg/kopscodecs"
"k8s.io/kops/pkg/try"
@ -76,7 +76,7 @@ var (
// NewCmdCreateInstanceGroup create a new cobra command object for creating a instancegroup.
func NewCmdCreateInstanceGroup(f *util.Factory, out io.Writer) *cobra.Command {
options := &CreateInstanceGroupOptions{
Role: string(api.InstanceGroupRoleNode),
Role: string(kopsapi.InstanceGroupRoleNode),
Edit: true,
}
@ -97,7 +97,7 @@ func NewCmdCreateInstanceGroup(f *util.Factory, out io.Writer) *cobra.Command {
// TODO: Create Enum helper - or is there one in k8s already?
var allRoles []string
for _, r := range api.AllInstanceGroupRoles {
for _, r := range kopsapi.AllInstanceGroupRoles {
allRoles = append(allRoles, string(r))
}
@ -148,10 +148,10 @@ func RunCreateInstanceGroup(ctx context.Context, f *util.Factory, cmd *cobra.Com
}
// Populate some defaults
ig := &api.InstanceGroup{}
ig := &kopsapi.InstanceGroup{}
ig.ObjectMeta.Name = groupName
role, ok := api.ParseInstanceGroupRole(options.Role, true)
role, ok := kopsapi.ParseInstanceGroupRole(options.Role, true)
if !ok {
return fmt.Errorf("unknown role %q", options.Role)
}
@ -165,7 +165,7 @@ func RunCreateInstanceGroup(ctx context.Context, f *util.Factory, cmd *cobra.Com
}
ig.AddInstanceGroupNodeLabel()
if api.CloudProviderID(cluster.Spec.CloudProvider) == api.CloudProviderGCE {
if kopsapi.CloudProviderID(cluster.Spec.CloudProvider) == kopsapi.CloudProviderGCE {
fmt.Println("detected a GCE cluster; labeling nodes to receive metadata-proxy.")
ig.Spec.NodeLabels["cloud.google.com/metadata-proxy-ready"] = "true"
}
@ -178,7 +178,7 @@ func RunCreateInstanceGroup(ctx context.Context, f *util.Factory, cmd *cobra.Com
// Cluster name is not populated, and we need it
ig.ObjectMeta.Labels = make(map[string]string)
ig.ObjectMeta.Labels[api.LabelClusterName] = cluster.ObjectMeta.Name
ig.ObjectMeta.Labels[kopsapi.LabelClusterName] = cluster.ObjectMeta.Name
switch options.Output {
case OutputYaml:
@ -222,7 +222,7 @@ func RunCreateInstanceGroup(ctx context.Context, f *util.Factory, cmd *cobra.Com
if err != nil {
return fmt.Errorf("error parsing yaml: %v", err)
}
group, ok := obj.(*api.InstanceGroup)
group, ok := obj.(*kopsapi.InstanceGroup)
if !ok {
return fmt.Errorf("unexpected object type: %T", obj)
}

View File

@ -25,7 +25,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/pkg/resources"
resourceops "k8s.io/kops/pkg/resources/ops"
@ -102,7 +102,7 @@ func RunDeleteCluster(ctx context.Context, f *util.Factory, out io.Writer, optio
}
var cloud fi.Cloud
var cluster *api.Cluster
var cluster *kopsapi.Cluster
var err error
if options.External {

View File

@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/commands"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kubectl/pkg/util/i18n"
@ -81,7 +81,7 @@ func RunExportKubecfg(ctx context.Context, f *util.Factory, out io.Writer, optio
return err
}
var clusterList []*api.Cluster
var clusterList []*kopsapi.Cluster
if options.all {
if len(args) != 0 {
return fmt.Errorf("Cannot use both --all flag and positional arguments")

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/util/pkg/tables"
"k8s.io/kubectl/pkg/util/i18n"
@ -125,7 +125,7 @@ func RunGetClusters(ctx context.Context, f Factory, out io.Writer, options *GetC
return err
}
var clusterList []*api.Cluster
var clusterList []*kopsapi.Cluster
if len(options.ClusterNames) != 1 {
list, err := client.ListClusters(ctx, metav1.ListOptions{})
if err != nil {
@ -186,14 +186,14 @@ func RunGetClusters(ctx context.Context, f Factory, out io.Writer, options *GetC
// filterClustersByName returns the clusters matching the specified names.
// If names are specified and no cluster is found with a name, we return an error.
func filterClustersByName(clusterNames []string, clusters []*api.Cluster) ([]*api.Cluster, error) {
func filterClustersByName(clusterNames []string, clusters []*kopsapi.Cluster) ([]*kopsapi.Cluster, error) {
if len(clusterNames) != 0 {
// Build a map as we want to return them in the same order as args
m := make(map[string]*api.Cluster)
m := make(map[string]*kopsapi.Cluster)
for _, c := range clusters {
m[c.ObjectMeta.Name] = c
}
var filtered []*api.Cluster
var filtered []*kopsapi.Cluster
for _, clusterName := range clusterNames {
c := m[clusterName]
if c == nil {
@ -208,15 +208,15 @@ func filterClustersByName(clusterNames []string, clusters []*api.Cluster) ([]*ap
return clusters, nil
}
func clusterOutputTable(clusters []*api.Cluster, out io.Writer) error {
func clusterOutputTable(clusters []*kopsapi.Cluster, out io.Writer) error {
t := &tables.Table{}
t.AddColumn("NAME", func(c *api.Cluster) string {
t.AddColumn("NAME", func(c *kopsapi.Cluster) string {
return c.ObjectMeta.Name
})
t.AddColumn("CLOUD", func(c *api.Cluster) string {
t.AddColumn("CLOUD", func(c *kopsapi.Cluster) string {
return c.Spec.CloudProvider
})
t.AddColumn("ZONES", func(c *api.Cluster) string {
t.AddColumn("ZONES", func(c *kopsapi.Cluster) string {
zones := sets.NewString()
for _, s := range c.Spec.Subnets {
if s.Zone != "" {
@ -276,14 +276,14 @@ func fullOutputYAML(out io.Writer, args ...runtime.Object) error {
return nil
}
func fullClusterSpecs(clusters []*api.Cluster) ([]*api.Cluster, error) {
var fullSpecs []*api.Cluster
func fullClusterSpecs(clusters []*kopsapi.Cluster) ([]*kopsapi.Cluster, error) {
var fullSpecs []*kopsapi.Cluster
for _, cluster := range clusters {
configBase, err := registry.ConfigBase(cluster)
if err != nil {
return nil, fmt.Errorf("error reading full cluster spec for %q: %v", cluster.ObjectMeta.Name, err)
}
fullSpec := &api.Cluster{}
fullSpec := &kopsapi.Cluster{}
err = registry.ReadConfigDeprecated(configBase.Join(registry.PathClusterCompleted), fullSpec)
if err != nil {
return nil, fmt.Errorf("error reading full cluster spec for %q: %v", cluster.ObjectMeta.Name, err)

View File

@ -32,7 +32,7 @@ import (
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/cloudinstances"
"k8s.io/kops/pkg/instancegroups"
"k8s.io/kops/pkg/pretty"
@ -268,7 +268,7 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer
return err
}
var instanceGroups []*api.InstanceGroup
var instanceGroups []*kopsapi.InstanceGroup
for i := range list.Items {
instanceGroups = append(instanceGroups, &list.Items[i])
}
@ -276,10 +276,10 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer
warnUnmatched := true
if len(options.InstanceGroups) != 0 {
var filtered []*api.InstanceGroup
var filtered []*kopsapi.InstanceGroup
for _, instanceGroupName := range options.InstanceGroups {
var found *api.InstanceGroup
var found *kopsapi.InstanceGroup
for _, ig := range instanceGroups {
if ig.ObjectMeta.Name == instanceGroupName {
found = ig
@ -300,11 +300,11 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer
}
if len(options.InstanceGroupRoles) != 0 {
var filtered []*api.InstanceGroup
var filtered []*kopsapi.InstanceGroup
for _, ig := range instanceGroups {
for _, role := range options.InstanceGroupRoles {
if ig.Spec.Role == api.InstanceGroupRole(strings.Title(strings.ToLower(role))) {
if ig.Spec.Role == kopsapi.InstanceGroupRole(strings.Title(strings.ToLower(role))) {
filtered = append(filtered, ig)
continue
}

View File

@ -28,7 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kops"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/commands"
"k8s.io/kops/upup/pkg/fi"
@ -96,7 +96,7 @@ func (c *UpgradeClusterCmd) Run(ctx context.Context, args []string) error {
return err
}
if cluster.ObjectMeta.Annotations[api.AnnotationNameManagement] == api.AnnotationValueManagementImported {
if cluster.ObjectMeta.Annotations[kopsapi.AnnotationNameManagement] == kopsapi.AnnotationValueManagementImported {
return fmt.Errorf("upgrade is not for use with imported clusters (did you mean `kops toolbox convert-imported`?)")
}
@ -105,7 +105,7 @@ func (c *UpgradeClusterCmd) Run(ctx context.Context, args []string) error {
channelLocation = cluster.Spec.Channel
}
if channelLocation == "" {
channelLocation = api.DefaultChannel
channelLocation = kopsapi.DefaultChannel
}
var actions []*upgradeAction
@ -121,7 +121,7 @@ func (c *UpgradeClusterCmd) Run(ctx context.Context, args []string) error {
})
}
channel, err := api.LoadChannel(channelLocation)
channel, err := kopsapi.LoadChannel(channelLocation)
if err != nil {
return fmt.Errorf("error loading channel %q: %v", channelLocation, err)
}
@ -129,7 +129,7 @@ func (c *UpgradeClusterCmd) Run(ctx context.Context, args []string) error {
channelClusterSpec := channel.Spec.Cluster
if channelClusterSpec == nil {
// Just to prevent too much nil handling
channelClusterSpec = &api.ClusterSpec{}
channelClusterSpec = &kopsapi.ClusterSpec{}
}
var currentKubernetesVersion *semver.Version
@ -142,7 +142,7 @@ func (c *UpgradeClusterCmd) Run(ctx context.Context, args []string) error {
}
}
proposedKubernetesVersion := api.RecommendedKubernetesVersion(channel, kops.Version)
proposedKubernetesVersion := kopsapi.RecommendedKubernetesVersion(channel, kops.Version)
// We won't propose a downgrade
// TODO: What if a kubernetes version is bad?
@ -173,7 +173,7 @@ func (c *UpgradeClusterCmd) Run(ctx context.Context, args []string) error {
// Prompt to upgrade to kubenet
if channelClusterSpec.Networking != nil {
if cluster.Spec.Networking == nil {
cluster.Spec.Networking = &api.NetworkingSpec{}
cluster.Spec.Networking = &kopsapi.NetworkingSpec{}
}
// TODO: make this less hard coded
if channelClusterSpec.Networking.Kubenet != nil && channelClusterSpec.Networking.Classic != nil {
@ -226,7 +226,7 @@ func (c *UpgradeClusterCmd) Run(ctx context.Context, args []string) error {
// Prompt to upgrade to overlayfs
if channelClusterSpec.Docker != nil {
if cluster.Spec.Docker == nil {
cluster.Spec.Docker = &api.DockerConfig{}
cluster.Spec.Docker = &kopsapi.DockerConfig{}
}
// TODO: make less hard-coded
if channelClusterSpec.Docker.Storage != nil {

View File

@ -35,7 +35,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/validation"
"k8s.io/kops/util/pkg/tables"
)
@ -113,7 +113,7 @@ func RunValidateCluster(ctx context.Context, f *util.Factory, cmd *cobra.Command
fmt.Fprintf(out, "Validating cluster %v\n\n", cluster.ObjectMeta.Name)
}
var instanceGroups []api.InstanceGroup
var instanceGroups []kopsapi.InstanceGroup
for _, ig := range list.Items {
instanceGroups = append(instanceGroups, ig)
klog.V(2).Infof("instance group: %#v\n\n", ig.Spec)
@ -217,24 +217,24 @@ func RunValidateCluster(ctx context.Context, f *util.Factory, cmd *cobra.Command
}
}
func validateClusterOutputTable(result *validation.ValidationCluster, cluster *api.Cluster, instanceGroups []api.InstanceGroup, out io.Writer) error {
func validateClusterOutputTable(result *validation.ValidationCluster, cluster *kopsapi.Cluster, instanceGroups []kopsapi.InstanceGroup, out io.Writer) error {
t := &tables.Table{}
t.AddColumn("NAME", func(c api.InstanceGroup) string {
t.AddColumn("NAME", func(c kopsapi.InstanceGroup) string {
return c.ObjectMeta.Name
})
t.AddColumn("ROLE", func(c api.InstanceGroup) string {
t.AddColumn("ROLE", func(c kopsapi.InstanceGroup) string {
return string(c.Spec.Role)
})
t.AddColumn("MACHINETYPE", func(c api.InstanceGroup) string {
t.AddColumn("MACHINETYPE", func(c kopsapi.InstanceGroup) string {
return c.Spec.MachineType
})
t.AddColumn("SUBNETS", func(c api.InstanceGroup) string {
t.AddColumn("SUBNETS", func(c kopsapi.InstanceGroup) string {
return strings.Join(c.Spec.Subnets, ",")
})
t.AddColumn("MIN", func(c api.InstanceGroup) string {
t.AddColumn("MIN", func(c kopsapi.InstanceGroup) string {
return int32PointerToString(c.Spec.MinSize)
})
t.AddColumn("MAX", func(c api.InstanceGroup) string {
t.AddColumn("MAX", func(c kopsapi.InstanceGroup) string {
return int32PointerToString(c.Spec.MaxSize)
})

View File

@ -19,14 +19,14 @@ package components
import (
"testing"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/assets"
)
func buildContainerdCluster(version string) *api.Cluster {
return &api.Cluster{
Spec: api.ClusterSpec{
func buildContainerdCluster(version string) *kopsapi.Cluster {
return &kopsapi.Cluster{
Spec: kopsapi.ClusterSpec{
CloudProvider: "aws",
KubernetesVersion: version,
},
@ -127,7 +127,7 @@ func Test_Build_Containerd_Unneeded_Runtime(t *testing.T) {
c := buildContainerdCluster("1.11.0")
c.Spec.ContainerRuntime = "docker"
c.Spec.Docker = &api.DockerConfig{
c.Spec.Docker = &kopsapi.DockerConfig{
Version: &v,
}
b := assets.NewAssetBuilder(c, "")
@ -156,7 +156,7 @@ func Test_Build_Containerd_Needed_Runtime(t *testing.T) {
c := buildContainerdCluster("1.11.0")
c.Spec.ContainerRuntime = "docker"
c.Spec.Docker = &api.DockerConfig{
c.Spec.Docker = &kopsapi.DockerConfig{
Version: &v,
}
b := assets.NewAssetBuilder(c, "")

View File

@ -21,7 +21,7 @@ import (
"path"
"testing"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple/vfsclientset"
"k8s.io/kops/pkg/kopscodecs"
@ -60,7 +60,7 @@ func runChannelBuilderTest(t *testing.T, key string, addonManifests []string) {
if err != nil {
t.Fatalf("error parsing cluster yaml %q: %v", clusterYamlPath, err)
}
cluster := obj.(*api.Cluster)
cluster := obj.(*kopsapi.Cluster)
if err := PerformAssignments(cluster); err != nil {
t.Fatalf("error from PerformAssignments for %q: %v", key, err)

View File

@ -21,14 +21,14 @@ import (
"strings"
"testing"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/upup/pkg/fi"
)
func TestDeepValidate_OK(t *testing.T) {
c := buildDefaultCluster(t)
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalMasterInstanceGroup("subnet-us-mock-1a"))
groups = append(groups, buildMinimalNodeInstanceGroup("subnet-us-mock-1a"))
err := validation.DeepValidate(c, groups, true)
@ -39,14 +39,14 @@ func TestDeepValidate_OK(t *testing.T) {
func TestDeepValidate_NoNodeZones(t *testing.T) {
c := buildDefaultCluster(t)
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalMasterInstanceGroup("subnet-us-mock-1a"))
expectErrorFromDeepValidate(t, c, groups, "must configure at least one Node InstanceGroup")
}
func TestDeepValidate_NoMasterZones(t *testing.T) {
c := buildDefaultCluster(t)
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalNodeInstanceGroup("subnet-us-mock-1a"))
expectErrorFromDeepValidate(t, c, groups, "must configure at least one Master InstanceGroup")
}
@ -54,10 +54,10 @@ func TestDeepValidate_NoMasterZones(t *testing.T) {
func TestDeepValidate_BadZone(t *testing.T) {
t.Skipf("Zone validation not checked by DeepValidate")
c := buildDefaultCluster(t)
c.Spec.Subnets = []api.ClusterSubnetSpec{
c.Spec.Subnets = []kopsapi.ClusterSubnetSpec{
{Name: "subnet-badzone", Zone: "us-mock-1z", CIDR: "172.20.1.0/24"},
}
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalMasterInstanceGroup("subnet-us-mock-1z"))
groups = append(groups, buildMinimalNodeInstanceGroup("subnet-us-mock-1z"))
expectErrorFromDeepValidate(t, c, groups, "Zone is not a recognized AZ")
@ -66,11 +66,11 @@ func TestDeepValidate_BadZone(t *testing.T) {
func TestDeepValidate_MixedRegion(t *testing.T) {
t.Skipf("Region validation not checked by DeepValidate")
c := buildDefaultCluster(t)
c.Spec.Subnets = []api.ClusterSubnetSpec{
c.Spec.Subnets = []kopsapi.ClusterSubnetSpec{
{Name: "mock1a", Zone: "us-mock-1a", CIDR: "172.20.1.0/24"},
{Name: "west1b", Zone: "us-west-1b", CIDR: "172.20.2.0/24"},
}
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalMasterInstanceGroup("subnet-us-mock-1a"))
groups = append(groups, buildMinimalNodeInstanceGroup("subnet-us-mock-1a", "subnet-us-west-1b"))
@ -80,10 +80,10 @@ func TestDeepValidate_MixedRegion(t *testing.T) {
func TestDeepValidate_RegionAsZone(t *testing.T) {
t.Skipf("Region validation not checked by DeepValidate")
c := buildDefaultCluster(t)
c.Spec.Subnets = []api.ClusterSubnetSpec{
c.Spec.Subnets = []kopsapi.ClusterSubnetSpec{
{Name: "mock1", Zone: "us-mock-1", CIDR: "172.20.1.0/24"},
}
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalMasterInstanceGroup("subnet-us-mock-1"))
groups = append(groups, buildMinimalNodeInstanceGroup("subnet-us-mock-1"))
@ -92,7 +92,7 @@ func TestDeepValidate_RegionAsZone(t *testing.T) {
func TestDeepValidate_NotIncludedZone(t *testing.T) {
c := buildDefaultCluster(t)
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalMasterInstanceGroup("subnet-us-mock-1d"))
groups = append(groups, buildMinimalNodeInstanceGroup("subnet-us-mock-1d"))
@ -101,11 +101,11 @@ func TestDeepValidate_NotIncludedZone(t *testing.T) {
func TestDeepValidate_DuplicateZones(t *testing.T) {
c := buildDefaultCluster(t)
c.Spec.Subnets = []api.ClusterSubnetSpec{
c.Spec.Subnets = []kopsapi.ClusterSubnetSpec{
{Name: "dup1", Zone: "us-mock-1a", CIDR: "172.20.1.0/24"},
{Name: "dup1", Zone: "us-mock-1a", CIDR: "172.20.2.0/24"},
}
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalMasterInstanceGroup("dup1"))
groups = append(groups, buildMinimalNodeInstanceGroup("dup1"))
expectErrorFromDeepValidate(t, c, groups, "spec.subnets[1].name: Duplicate value: \"dup1\"")
@ -113,11 +113,11 @@ func TestDeepValidate_DuplicateZones(t *testing.T) {
func TestDeepValidate_ExtraMasterZone(t *testing.T) {
c := buildDefaultCluster(t)
c.Spec.Subnets = []api.ClusterSubnetSpec{
c.Spec.Subnets = []kopsapi.ClusterSubnetSpec{
{Name: "mock1a", Zone: "us-mock-1a", CIDR: "172.20.1.0/24"},
{Name: "mock1b", Zone: "us-mock-1b", CIDR: "172.20.2.0/24"},
}
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalMasterInstanceGroup("subnet-us-mock-1a", "subnet-us-mock-1b", "subnet-us-mock-1c"))
groups = append(groups, buildMinimalNodeInstanceGroup("subnet-us-mock-1a", "subnet-us-mock-1b"))
@ -126,24 +126,24 @@ func TestDeepValidate_ExtraMasterZone(t *testing.T) {
func TestDeepValidate_EvenEtcdClusterSize(t *testing.T) {
c := buildDefaultCluster(t)
c.Spec.EtcdClusters = []*api.EtcdClusterSpec{
c.Spec.EtcdClusters = []*kopsapi.EtcdClusterSpec{
{
Name: "main",
Members: []*api.EtcdMemberSpec{
Members: []*kopsapi.EtcdMemberSpec{
{Name: "us-mock-1a", InstanceGroup: fi.String("us-mock-1a")},
{Name: "us-mock-1b", InstanceGroup: fi.String("us-mock-1b")},
},
},
}
var groups []*api.InstanceGroup
var groups []*kopsapi.InstanceGroup
groups = append(groups, buildMinimalMasterInstanceGroup("subnet-us-mock-1a", "subnet-us-mock-1b", "subnet-us-mock-1c", "subnet-us-mock-1d"))
groups = append(groups, buildMinimalNodeInstanceGroup("subnet-us-mock-1a"))
expectErrorFromDeepValidate(t, c, groups, "Should be an odd number of master-zones for quorum. Use --zones and --master-zones to declare node zones and master zones separately")
}
func expectErrorFromDeepValidate(t *testing.T, c *api.Cluster, groups []*api.InstanceGroup, message string) {
func expectErrorFromDeepValidate(t *testing.T, c *kopsapi.Cluster, groups []*kopsapi.InstanceGroup, message string) {
err := validation.DeepValidate(c, groups, true)
if err == nil {
t.Fatalf("Expected error %q from DeepValidate (strict=true), not no error raised", message)

View File

@ -28,7 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/assettasks"
@ -44,7 +44,7 @@ const (
)
type Loader struct {
Cluster *api.Cluster
Cluster *kopsapi.Cluster
WorkDir string

View File

@ -22,13 +22,13 @@ import (
"os"
"k8s.io/klog"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/util/pkg/hashing"
)
func usesCNI(c *api.Cluster) bool {
func usesCNI(c *kopsapi.Cluster) bool {
networkConfig := c.Spec.Networking
if networkConfig == nil || networkConfig.Classic != nil {
// classic
@ -134,7 +134,7 @@ const (
ENV_VAR_CNI_ASSET_HASH_STRING = "CNI_ASSET_HASH_STRING"
)
func findCNIAssets(c *api.Cluster, assetBuilder *assets.AssetBuilder) (*url.URL, *hashing.Hash, error) {
func findCNIAssets(c *kopsapi.Cluster, assetBuilder *assets.AssetBuilder) (*url.URL, *hashing.Hash, error) {
if cniVersionURL := os.Getenv(ENV_VAR_CNI_VERSION_URL); cniVersionURL != "" {
u, err := url.Parse(cniVersionURL)

View File

@ -25,7 +25,7 @@ import (
"k8s.io/klog"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/pkg/assets"
@ -48,10 +48,10 @@ var EtcdClusters = []string{"main", "events"}
type populateClusterSpec struct {
// InputCluster is the api object representing the whole cluster, as input by the user
// We build it up into a complete config, but we write the values as input
InputCluster *api.Cluster
InputCluster *kopsapi.Cluster
// fullCluster holds the built completed cluster spec
fullCluster *api.Cluster
fullCluster *kopsapi.Cluster
// assetBuilder holds the AssetBuilder, used to store assets we discover / remap
assetBuilder *assets.AssetBuilder
@ -64,7 +64,7 @@ func findModelStore() (vfs.Path, error) {
// PopulateClusterSpec takes a user-specified cluster spec, and computes the full specification that should be set on the cluster.
// We do this so that we don't need any real "brains" on the node side.
func PopulateClusterSpec(clientset simple.Clientset, cluster *api.Cluster, assetBuilder *assets.AssetBuilder) (*api.Cluster, error) {
func PopulateClusterSpec(clientset simple.Clientset, cluster *kopsapi.Cluster, assetBuilder *assets.AssetBuilder) (*kopsapi.Cluster, error) {
c := &populateClusterSpec{
InputCluster: cluster,
assetBuilder: assetBuilder,
@ -92,7 +92,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
}
// Copy cluster & instance groups, so we can modify them freely
cluster := &api.Cluster{}
cluster := &kopsapi.Cluster{}
reflectutils.JsonMergeStruct(cluster, c.InputCluster)
@ -115,7 +115,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
// Check that instance groups are defined in valid zones
{
// TODO: Check that instance groups referenced here exist
//clusterSubnets := make(map[string]*api.ClusterSubnetSpec)
//clusterSubnets := make(map[string]*kopsapi.ClusterSubnetSpec)
//for _, subnet := range cluster.Spec.Subnets {
// if clusterSubnets[subnet.Name] != nil {
// return fmt.Errorf("Subnets contained a duplicate value: %v", subnet.Name)
@ -140,8 +140,8 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
}
}
etcdInstanceGroups := make(map[string]*api.EtcdMemberSpec)
etcdNames := make(map[string]*api.EtcdMemberSpec)
etcdInstanceGroups := make(map[string]*kopsapi.EtcdMemberSpec)
etcdNames := make(map[string]*kopsapi.EtcdMemberSpec)
for _, m := range etcd.Members {
if etcdNames[m.Name] != nil {
@ -238,7 +238,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
return err
}
dnsType := api.DNSTypePublic
dnsType := kopsapi.DNSTypePublic
if cluster.Spec.Topology != nil && cluster.Spec.Topology.DNS != nil && cluster.Spec.Topology.DNS.Type != "" {
dnsType = cluster.Spec.Topology.DNS.Type
}
@ -322,7 +322,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
completed.Topology = c.InputCluster.Spec.Topology
//completed.Topology.Bastion = c.InputCluster.Spec.Topology.Bastion
fullCluster := &api.Cluster{}
fullCluster := &kopsapi.Cluster{}
*fullCluster = *cluster
fullCluster.Spec = *completed
tf.cluster = fullCluster
@ -335,7 +335,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
return nil
}
func (c *populateClusterSpec) assignSubnets(cluster *api.Cluster) error {
func (c *populateClusterSpec) assignSubnets(cluster *kopsapi.Cluster) error {
if cluster.Spec.NonMasqueradeCIDR == "" {
klog.Warningf("NonMasqueradeCIDR not set; can't auto-assign dependent subnets")
return nil
@ -348,7 +348,7 @@ func (c *populateClusterSpec) assignSubnets(cluster *api.Cluster) error {
nmOnes, nmBits := nonMasqueradeCIDR.Mask.Size()
if cluster.Spec.KubeControllerManager == nil {
cluster.Spec.KubeControllerManager = &api.KubeControllerManagerConfig{}
cluster.Spec.KubeControllerManager = &kopsapi.KubeControllerManagerConfig{}
}
if cluster.Spec.KubeControllerManager.ClusterCIDR == "" {

View File

@ -22,7 +22,7 @@ import (
"testing"
"k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple/vfsclientset"
"k8s.io/kops/upup/pkg/fi"
@ -30,13 +30,13 @@ import (
"k8s.io/kops/util/pkg/vfs"
)
func buildMinimalCluster() *api.Cluster {
func buildMinimalCluster() *kopsapi.Cluster {
awsup.InstallMockAWSCloud(MockAWSRegion, "abcd")
c := &api.Cluster{}
c := &kopsapi.Cluster{}
c.ObjectMeta.Name = "testcluster.test.com"
c.Spec.KubernetesVersion = "1.14.6"
c.Spec.Subnets = []api.ClusterSubnetSpec{
c.Spec.Subnets = []kopsapi.ClusterSubnetSpec{
{Name: "subnet-us-mock-1a", Zone: "us-mock-1a", CIDR: "172.20.1.0/24"},
{Name: "subnet-us-mock-1b", Zone: "us-mock-1b", CIDR: "172.20.2.0/24"},
{Name: "subnet-us-mock-1c", Zone: "us-mock-1c", CIDR: "172.20.3.0/24"},
@ -46,9 +46,9 @@ func buildMinimalCluster() *api.Cluster {
c.Spec.SSHAccess = []string{"0.0.0.0/0"}
// Default to public topology
c.Spec.Topology = &api.TopologySpec{
Masters: api.TopologyPublic,
Nodes: api.TopologyPublic,
c.Spec.Topology = &kopsapi.TopologySpec{
Masters: kopsapi.TopologyPublic,
Nodes: kopsapi.TopologyPublic,
}
c.Spec.NetworkCIDR = "172.20.0.0/16"
c.Spec.NonMasqueradeCIDR = "100.64.0.0/10"
@ -60,12 +60,12 @@ func buildMinimalCluster() *api.Cluster {
// TODO: Mock cloudprovider
c.Spec.DNSZone = "test.com"
c.Spec.Networking = &api.NetworkingSpec{}
c.Spec.Networking = &kopsapi.NetworkingSpec{}
return c
}
func addEtcdClusters(c *api.Cluster) {
func addEtcdClusters(c *kopsapi.Cluster) {
subnetNames := sets.NewString()
for _, z := range c.Spec.Subnets {
subnetNames.Insert(z.Name)
@ -73,10 +73,10 @@ func addEtcdClusters(c *api.Cluster) {
etcdZones := subnetNames.List()
for _, etcdCluster := range EtcdClusters {
etcd := &api.EtcdClusterSpec{}
etcd := &kopsapi.EtcdClusterSpec{}
etcd.Name = etcdCluster
for _, zone := range etcdZones {
m := &api.EtcdMemberSpec{}
m := &kopsapi.EtcdMemberSpec{}
m.Name = zone
m.InstanceGroup = fi.String(zone)
etcd.Members = append(etcd.Members, m)
@ -101,7 +101,7 @@ func TestPopulateCluster_Default_NoError(t *testing.T) {
}
}
func mockedPopulateClusterSpec(c *api.Cluster) (*api.Cluster, error) {
func mockedPopulateClusterSpec(c *kopsapi.Cluster) (*kopsapi.Cluster, error) {
vfs.Context.ResetMemfsContext(true)
assetBuilder := assets.NewAssetBuilder(c, "")
@ -115,7 +115,7 @@ func mockedPopulateClusterSpec(c *api.Cluster) (*api.Cluster, error) {
func TestPopulateCluster_Docker_Spec(t *testing.T) {
c := buildMinimalCluster()
c.Spec.Docker = &api.DockerConfig{
c.Spec.Docker = &kopsapi.DockerConfig{
MTU: fi.Int32(5678),
InsecureRegistry: fi.String("myregistry.com:1234"),
InsecureRegistries: []string{"myregistry.com:1234", "myregistry2.com:1234"},
@ -175,7 +175,7 @@ func TestPopulateCluster_StorageDefault(t *testing.T) {
}
}
func build(c *api.Cluster) (*api.Cluster, error) {
func build(c *kopsapi.Cluster) (*kopsapi.Cluster, error) {
err := PerformAssignments(c)
if err != nil {
return nil, fmt.Errorf("error from PerformAssignments: %v", err)
@ -210,7 +210,7 @@ func TestPopulateCluster_Kubenet(t *testing.T) {
func TestPopulateCluster_CNI(t *testing.T) {
c := buildMinimalCluster()
c.Spec.Kubelet = &api.KubeletConfigSpec{
c.Spec.Kubelet = &kopsapi.KubeletConfigSpec{
ConfigureCBR0: fi.Bool(false),
NetworkPluginName: "cni",
NonMasqueradeCIDR: c.Spec.NonMasqueradeCIDR,
@ -238,7 +238,7 @@ func TestPopulateCluster_CNI(t *testing.T) {
func TestPopulateCluster_Custom_CIDR(t *testing.T) {
c := buildMinimalCluster()
c.Spec.NetworkCIDR = "172.20.2.0/24"
c.Spec.Subnets = []api.ClusterSubnetSpec{
c.Spec.Subnets = []kopsapi.ClusterSubnetSpec{
{Name: "subnet-us-mock-1a", Zone: "us-mock-1a", CIDR: "172.20.2.0/27"},
{Name: "subnet-us-mock-1b", Zone: "us-mock-1b", CIDR: "172.20.2.32/27"},
{Name: "subnet-us-mock-1c", Zone: "us-mock-1c", CIDR: "172.20.2.64/27"},
@ -364,9 +364,9 @@ func TestPopulateCluster_BastionInvalidMatchingValues_Required(t *testing.T) {
// We can't have a bastion with public masters / nodes
c := buildMinimalCluster()
addEtcdClusters(c)
c.Spec.Topology.Masters = api.TopologyPublic
c.Spec.Topology.Nodes = api.TopologyPublic
c.Spec.Topology.Bastion = &api.BastionSpec{}
c.Spec.Topology.Masters = kopsapi.TopologyPublic
c.Spec.Topology.Nodes = kopsapi.TopologyPublic
c.Spec.Topology.Bastion = &kopsapi.BastionSpec{}
expectErrorFromPopulateCluster(t, c, "bastion")
}
@ -374,14 +374,14 @@ func TestPopulateCluster_BastionIdleTimeoutInvalidNegative_Required(t *testing.T
c := buildMinimalCluster()
addEtcdClusters(c)
c.Spec.Topology.Masters = api.TopologyPrivate
c.Spec.Topology.Nodes = api.TopologyPrivate
c.Spec.Topology.Bastion = &api.BastionSpec{}
c.Spec.Topology.Masters = kopsapi.TopologyPrivate
c.Spec.Topology.Nodes = kopsapi.TopologyPrivate
c.Spec.Topology.Bastion = &kopsapi.BastionSpec{}
c.Spec.Topology.Bastion.IdleTimeoutSeconds = fi.Int64(-1)
expectErrorFromPopulateCluster(t, c, "bastion")
}
func expectErrorFromPopulateCluster(t *testing.T, c *api.Cluster, message string) {
func expectErrorFromPopulateCluster(t *testing.T, c *kopsapi.Cluster, message string) {
_, err := mockedPopulateClusterSpec(c)
if err == nil {
t.Fatalf("Expected error from PopulateCluster")

View File

@ -21,22 +21,22 @@ import (
"strings"
"testing"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
)
func buildMinimalNodeInstanceGroup(subnets ...string) *api.InstanceGroup {
g := &api.InstanceGroup{}
func buildMinimalNodeInstanceGroup(subnets ...string) *kopsapi.InstanceGroup {
g := &kopsapi.InstanceGroup{}
g.ObjectMeta.Name = "nodes"
g.Spec.Role = api.InstanceGroupRoleNode
g.Spec.Role = kopsapi.InstanceGroupRoleNode
g.Spec.Subnets = subnets
return g
}
func buildMinimalMasterInstanceGroup(subnets ...string) *api.InstanceGroup {
g := &api.InstanceGroup{}
func buildMinimalMasterInstanceGroup(subnets ...string) *kopsapi.InstanceGroup {
g := &kopsapi.InstanceGroup{}
g.ObjectMeta.Name = "master"
g.Spec.Role = api.InstanceGroupRoleMaster
g.Spec.Role = kopsapi.InstanceGroupRoleMaster
g.Spec.Subnets = subnets
return g
@ -47,7 +47,7 @@ func TestPopulateInstanceGroup_Name_Required(t *testing.T) {
g := buildMinimalNodeInstanceGroup()
g.ObjectMeta.Name = ""
channel := &api.Channel{}
channel := &kopsapi.Channel{}
expectErrorFromPopulateInstanceGroup(t, cluster, g, channel, "objectMeta.name")
}
@ -57,12 +57,12 @@ func TestPopulateInstanceGroup_Role_Required(t *testing.T) {
g := buildMinimalNodeInstanceGroup()
g.Spec.Role = ""
channel := &api.Channel{}
channel := &kopsapi.Channel{}
expectErrorFromPopulateInstanceGroup(t, cluster, g, channel, "spec.role")
}
func expectErrorFromPopulateInstanceGroup(t *testing.T, cluster *api.Cluster, g *api.InstanceGroup, channel *api.Channel, message string) {
func expectErrorFromPopulateInstanceGroup(t *testing.T, cluster *kopsapi.Cluster, g *kopsapi.InstanceGroup, channel *kopsapi.Channel, message string) {
_, err := PopulateInstanceGroupSpec(cluster, g, channel)
if err == nil {
t.Fatalf("Expected error from PopulateInstanceGroup")

View File

@ -19,7 +19,7 @@ package cloudup
import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
api "k8s.io/kops/pkg/apis/kops"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/loader"
"k8s.io/kops/util/pkg/reflectutils"
@ -31,17 +31,17 @@ type SpecBuilder struct {
Tags sets.String
}
func (l *SpecBuilder) BuildCompleteSpec(clusterSpec *api.ClusterSpec) (*api.ClusterSpec, error) {
func (l *SpecBuilder) BuildCompleteSpec(clusterSpec *kopsapi.ClusterSpec) (*kopsapi.ClusterSpec, error) {
loaded, err := l.OptionsLoader.Build(clusterSpec)
if err != nil {
return nil, err
}
completed := &api.ClusterSpec{}
*completed = *(loaded.(*api.ClusterSpec))
completed := &kopsapi.ClusterSpec{}
*completed = *(loaded.(*kopsapi.ClusterSpec))
// Master kubelet config = (base kubelet config + master kubelet config)
masterKubelet := &api.KubeletConfigSpec{}
masterKubelet := &kopsapi.KubeletConfigSpec{}
reflectutils.JsonMergeStruct(masterKubelet, completed.Kubelet)
reflectutils.JsonMergeStruct(masterKubelet, completed.MasterKubelet)
completed.MasterKubelet = masterKubelet