mirror of https://github.com/kubernetes/kops.git
Merge pull request #6438 from zetaab/octavia
support both octavia and old lbaasv2 api in openstack
This commit is contained in:
commit
2094ef7d20
|
@ -152,6 +152,9 @@ type CreateClusterOptions struct {
|
|||
OpenstackExternalNet string
|
||||
OpenstackStorageIgnoreAZ bool
|
||||
|
||||
// OpenstackLBOctavia is boolean value should we use octavia or old loadbalancer api
|
||||
OpenstackLBOctavia bool
|
||||
|
||||
// ConfigBase is the location where we will store the configuration, it defaults to the state store
|
||||
ConfigBase string
|
||||
|
||||
|
@ -379,6 +382,7 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
|
|||
// Openstack flags
|
||||
cmd.Flags().StringVar(&options.OpenstackExternalNet, "os-ext-net", options.OpenstackExternalNet, "The name of the external network to use with the openstack router")
|
||||
cmd.Flags().BoolVar(&options.OpenstackStorageIgnoreAZ, "os-kubelet-ignore-az", options.OpenstackStorageIgnoreAZ, "If true kubernetes may attach volumes across availability zones")
|
||||
cmd.Flags().BoolVar(&options.OpenstackLBOctavia, "os-octavia", options.OpenstackLBOctavia, "If true octavia loadbalancer api will be used")
|
||||
}
|
||||
|
||||
return cmd
|
||||
|
@ -887,6 +891,10 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
if cluster.Spec.CloudConfig == nil {
|
||||
cluster.Spec.CloudConfig = &api.CloudConfiguration{}
|
||||
}
|
||||
provider := "haproxy"
|
||||
if c.OpenstackLBOctavia {
|
||||
provider = "octavia"
|
||||
}
|
||||
cluster.Spec.CloudConfig.Openstack = &api.OpenstackConfiguration{
|
||||
Router: &api.OpenstackRouter{
|
||||
ExternalNetwork: fi.String(c.OpenstackExternalNet),
|
||||
|
@ -894,8 +902,8 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
Loadbalancer: &api.OpenstackLoadbalancerConfig{
|
||||
FloatingNetwork: fi.String(c.OpenstackExternalNet),
|
||||
Method: fi.String("ROUND_ROBIN"),
|
||||
Provider: fi.String("haproxy"),
|
||||
UseOctavia: fi.Bool(false),
|
||||
Provider: fi.String(provider),
|
||||
UseOctavia: fi.Bool(c.OpenstackLBOctavia),
|
||||
},
|
||||
BlockStorage: &api.OpenstackBlockStorageConfig{
|
||||
Version: fi.String("v2"),
|
||||
|
|
|
@ -62,3 +62,5 @@ kops delete cluster my-cluster.k8s.local --yes
|
|||
|
||||
#### Optional flags
|
||||
* `--os-kubelet-ignore-az=true` Nova and Cinder have different availability zones, more information [Kubernetes docs](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#block-storage)
|
||||
* `--os-octavia=true` If Octavia Loadbalancer api should be used instead of old lbaas v2 api.
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ go_library(
|
|||
"//pkg/resources:go_default_library",
|
||||
"//upup/pkg/fi:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/openstack:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets:go_default_library",
|
||||
|
|
|
@ -19,6 +19,7 @@ package openstack
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners"
|
||||
"github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers"
|
||||
v2pools "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools"
|
||||
|
@ -63,6 +64,11 @@ func (os *clusterDiscoveryOS) ListLB() ([]*resources.Resource, error) {
|
|||
func (os *clusterDiscoveryOS) ListLBPools() ([]*resources.Resource, error) {
|
||||
var resourceTrackers []*resources.Resource
|
||||
|
||||
if os.osCloud.UseOctavia() {
|
||||
glog.V(2).Info("skipping ListLBPools because using Octavia")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pools, err := os.osCloud.ListPools(v2pools.ListOpts{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -87,6 +93,11 @@ func (os *clusterDiscoveryOS) ListLBPools() ([]*resources.Resource, error) {
|
|||
func (os *clusterDiscoveryOS) ListLBListener() ([]*resources.Resource, error) {
|
||||
var resourceTrackers []*resources.Resource
|
||||
|
||||
if os.osCloud.UseOctavia() {
|
||||
glog.V(2).Info("skipping ListLBListener because using Octavia")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
listeners, err := os.osCloud.ListListeners(listeners.ListOpts{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -89,6 +89,7 @@ type OpenstackCloud interface {
|
|||
NetworkingClient() *gophercloud.ServiceClient
|
||||
LoadBalancerClient() *gophercloud.ServiceClient
|
||||
DNSClient() *gophercloud.ServiceClient
|
||||
UseOctavia() bool
|
||||
|
||||
// Region returns the region which cloud will run on
|
||||
Region() string
|
||||
|
@ -271,6 +272,7 @@ type openstackCloud struct {
|
|||
extNetworkName *string
|
||||
tags map[string]string
|
||||
region string
|
||||
useOctavia bool
|
||||
}
|
||||
|
||||
var _ fi.Cloud = &openstackCloud{}
|
||||
|
@ -352,24 +354,17 @@ func NewOpenstackCloud(tags map[string]string, spec *kops.ClusterSpec) (Openstac
|
|||
}
|
||||
}
|
||||
|
||||
lbClient, err := os.NewLoadBalancerV2(provider, gophercloud.EndpointOpts{
|
||||
Type: "network",
|
||||
Region: region,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building lb client: %v", err)
|
||||
}
|
||||
|
||||
c := &openstackCloud{
|
||||
cinderClient: cinderClient,
|
||||
neutronClient: neutronClient,
|
||||
novaClient: novaClient,
|
||||
lbClient: lbClient,
|
||||
dnsClient: dnsClient,
|
||||
tags: tags,
|
||||
region: region,
|
||||
useOctavia: false,
|
||||
}
|
||||
|
||||
octavia := false
|
||||
if spec != nil &&
|
||||
spec.CloudConfig != nil &&
|
||||
spec.CloudConfig.Openstack != nil &&
|
||||
|
@ -389,11 +384,37 @@ func NewOpenstackCloud(tags map[string]string, spec *kops.ClusterSpec) (Openstac
|
|||
}
|
||||
spec.CloudConfig.Openstack.Loadbalancer.FloatingNetworkID = fi.String(lbNet[0].ID)
|
||||
}
|
||||
if spec.CloudConfig.Openstack.Loadbalancer.UseOctavia != nil {
|
||||
octavia = fi.BoolValue(spec.CloudConfig.Openstack.Loadbalancer.UseOctavia)
|
||||
}
|
||||
}
|
||||
|
||||
c.useOctavia = octavia
|
||||
var lbClient *gophercloud.ServiceClient
|
||||
if octavia {
|
||||
glog.V(2).Infof("Openstack using Octavia lbaasv2 api")
|
||||
lbClient, err = os.NewLoadBalancerV2(provider, gophercloud.EndpointOpts{
|
||||
Region: region,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building lb client: %v", err)
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("Openstack using deprecated lbaasv2 api")
|
||||
lbClient, err = os.NewNetworkV2(provider, gophercloud.EndpointOpts{
|
||||
Region: region,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building lb client: %v", err)
|
||||
}
|
||||
}
|
||||
c.lbClient = lbClient
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *openstackCloud) UseOctavia() bool {
|
||||
return c.useOctavia
|
||||
}
|
||||
|
||||
func (c *openstackCloud) ComputeClient() *gophercloud.ServiceClient {
|
||||
return c.novaClient
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
func (c *openstackCloud) DeletePool(poolID string) error {
|
||||
done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
|
||||
err := v2pools.Delete(c.lbClient, poolID).ExtractErr()
|
||||
err := v2pools.Delete(c.LoadBalancerClient(), poolID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return false, fmt.Errorf("error deleting pool: %v", err)
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ func (c *openstackCloud) DeletePool(poolID string) error {
|
|||
|
||||
func (c *openstackCloud) DeleteListener(listenerID string) error {
|
||||
done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
|
||||
err := listeners.Delete(c.lbClient, listenerID).ExtractErr()
|
||||
err := listeners.Delete(c.LoadBalancerClient(), listenerID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return false, fmt.Errorf("error deleting listener: %v", err)
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ func (c *openstackCloud) DeleteListener(listenerID string) error {
|
|||
|
||||
func (c *openstackCloud) DeleteLB(lbID string, opts loadbalancers.DeleteOpts) error {
|
||||
done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
|
||||
err := loadbalancers.Delete(c.lbClient, lbID, opts).ExtractErr()
|
||||
err := loadbalancers.Delete(c.LoadBalancerClient(), lbID, opts).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return false, fmt.Errorf("error deleting loadbalancer: %v", err)
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ func (c *openstackCloud) CreateLB(opt loadbalancers.CreateOptsBuilder) (*loadbal
|
|||
var i *loadbalancers.LoadBalancer
|
||||
|
||||
done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
|
||||
v, err := loadbalancers.Create(c.lbClient, opt).Extract()
|
||||
v, err := loadbalancers.Create(c.LoadBalancerClient(), opt).Extract()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error creating loadbalancer: %v", err)
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func (c *openstackCloud) CreateLB(opt loadbalancers.CreateOptsBuilder) (*loadbal
|
|||
func (c *openstackCloud) GetLB(loadbalancerID string) (lb *loadbalancers.LoadBalancer, err error) {
|
||||
|
||||
done, err := vfs.RetryWithBackoff(readBackoff, func() (bool, error) {
|
||||
lb, err = loadbalancers.Get(c.neutronClient, loadbalancerID).Extract()
|
||||
lb, err = loadbalancers.Get(c.LoadBalancerClient(), loadbalancerID).Extract()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ func (c *openstackCloud) GetLB(loadbalancerID string) (lb *loadbalancers.LoadBal
|
|||
func (c *openstackCloud) ListLBs(opt loadbalancers.ListOptsBuilder) (lbs []loadbalancers.LoadBalancer, err error) {
|
||||
|
||||
done, err := vfs.RetryWithBackoff(readBackoff, func() (bool, error) {
|
||||
allPages, err := loadbalancers.List(c.lbClient, opt).AllPages()
|
||||
allPages, err := loadbalancers.List(c.LoadBalancerClient(), opt).AllPages()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list loadbalancers: %s", err)
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ func (c *openstackCloud) ListLBs(opt loadbalancers.ListOptsBuilder) (lbs []loadb
|
|||
|
||||
func (c *openstackCloud) GetPool(poolID string, memberID string) (member *v2pools.Member, err error) {
|
||||
done, err := vfs.RetryWithBackoff(readBackoff, func() (bool, error) {
|
||||
member, err = v2pools.GetMember(c.neutronClient, poolID, memberID).Extract()
|
||||
member, err = v2pools.GetMember(c.LoadBalancerClient(), poolID, memberID).Extract()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -159,10 +159,10 @@ func (c *openstackCloud) GetPool(poolID string, memberID string) (member *v2pool
|
|||
func (c *openstackCloud) AssociateToPool(server *servers.Server, poolID string, opts v2pools.CreateMemberOpts) (association *v2pools.Member, err error) {
|
||||
|
||||
done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
|
||||
association, err = v2pools.GetMember(c.NetworkingClient(), poolID, server.ID).Extract()
|
||||
association, err = v2pools.GetMember(c.LoadBalancerClient(), poolID, server.ID).Extract()
|
||||
if err != nil || association == nil {
|
||||
// Pool association does not exist. Create it
|
||||
association, err = v2pools.CreateMember(c.NetworkingClient(), poolID, opts).Extract()
|
||||
association, err = v2pools.CreateMember(c.LoadBalancerClient(), poolID, opts).Extract()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to create pool association: %v", err)
|
||||
}
|
||||
|
|
|
@ -60,5 +60,6 @@ go_library(
|
|||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -18,10 +18,13 @@ package openstacktasks
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
|
||||
)
|
||||
|
@ -36,6 +39,50 @@ type LB struct {
|
|||
PortID *string
|
||||
}
|
||||
|
||||
const (
|
||||
// loadbalancerActive* is configuration of exponential backoff for
|
||||
// going into ACTIVE loadbalancer provisioning status. Starting with 1
|
||||
// seconds, multiplying by 1.2 with each step and taking 22 steps at maximum
|
||||
// it will time out after 326s, which roughly corresponds to about 5 minutes
|
||||
loadbalancerActiveInitDelay = 1 * time.Second
|
||||
loadbalancerActiveFactor = 1.2
|
||||
loadbalancerActiveSteps = 22
|
||||
|
||||
activeStatus = "ACTIVE"
|
||||
errorStatus = "ERROR"
|
||||
)
|
||||
|
||||
func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient, loadbalancerID string) (string, error) {
|
||||
backoff := wait.Backoff{
|
||||
Duration: loadbalancerActiveInitDelay,
|
||||
Factor: loadbalancerActiveFactor,
|
||||
Steps: loadbalancerActiveSteps,
|
||||
}
|
||||
|
||||
var provisioningStatus string
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
loadbalancer, err := loadbalancers.Get(client, loadbalancerID).Extract()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
provisioningStatus = loadbalancer.ProvisioningStatus
|
||||
if loadbalancer.ProvisioningStatus == activeStatus {
|
||||
return true, nil
|
||||
} else if loadbalancer.ProvisioningStatus == errorStatus {
|
||||
return true, fmt.Errorf("loadbalancer has gone into ERROR state")
|
||||
} else {
|
||||
glog.Infof("Waiting for Loadbalancer to be ACTIVE...")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("loadbalancer failed to go into ACTIVE provisioning status within allotted time")
|
||||
}
|
||||
return provisioningStatus, err
|
||||
}
|
||||
|
||||
// GetDependencies returns the dependencies of the Instance task
|
||||
func (e *LB) GetDependencies(tasks map[string]fi.Task) []fi.Task {
|
||||
var deps []fi.Task
|
||||
|
|
|
@ -127,6 +127,12 @@ func (_ *LBPool) CheckChanges(a, e, changes *LBPool) error {
|
|||
func (_ *LBPool) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *LBPool) error {
|
||||
if a == nil {
|
||||
|
||||
// wait that lb is in ACTIVE state
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(t.Cloud.LoadBalancerClient(), fi.StringValue(e.Loadbalancer.ID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to loadbalancer ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
|
||||
poolopts := v2pools.CreateOpts{
|
||||
Name: fi.StringValue(e.Name),
|
||||
LBMethod: v2pools.LBMethodRoundRobin,
|
||||
|
|
Loading…
Reference in New Issue