chore: refactor factory to accept a cluster

This should allow us to build our own rest config in future,
rather than relying on the kubeconfig being configured correctly.

To do this, we need to stop sharing the factory between the channels
and kops commands.
This commit is contained in:
justinsb 2024-12-27 15:32:26 -05:00
parent df6232e646
commit 859a9fd9f1
19 changed files with 367 additions and 187 deletions

View File

@ -17,20 +17,29 @@ limitations under the License.
package main package main
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kops/channels/pkg/cmd" "k8s.io/kops/channels/pkg/cmd"
"k8s.io/kops/cmd/kops/util"
) )
func main() { func main() {
klog.InitFlags(nil) if err := run(context.Background()); err != nil {
f := util.NewFactory(nil)
if err := cmd.Execute(f, os.Stdout); err != nil {
fmt.Fprintf(os.Stderr, "\n%v\n", err) fmt.Fprintf(os.Stderr, "\n%v\n", err)
os.Exit(1) os.Exit(1)
} }
} }
func run(ctx context.Context) error {
klog.InitFlags(nil)
f := cmd.NewChannelsFactory()
if err := cmd.Execute(ctx, f, os.Stdout); err != nil {
return err
}
return nil
}

View File

@ -22,7 +22,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
func NewCmdApply(f Factory, out io.Writer) *cobra.Command { func NewCmdApply(f *ChannelsFactory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "apply", Use: "apply",
Short: "apply resources from a channel", Short: "apply resources from a channel",

View File

@ -25,12 +25,15 @@ import (
"github.com/blang/semver/v4" "github.com/blang/semver/v4"
"github.com/cert-manager/cert-manager/pkg/client/clientset/versioned" "github.com/cert-manager/cert-manager/pkg/client/clientset/versioned"
certmanager "github.com/cert-manager/cert-manager/pkg/client/clientset/versioned"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.uber.org/multierr" "go.uber.org/multierr"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/restmapper" "k8s.io/client-go/restmapper"
"k8s.io/kops/channels/pkg/channels" "k8s.io/kops/channels/pkg/channels"
"k8s.io/kops/util/pkg/tables" "k8s.io/kops/util/pkg/tables"
"k8s.io/kops/util/pkg/vfs" "k8s.io/kops/util/pkg/vfs"
@ -38,9 +41,11 @@ import (
type ApplyChannelOptions struct { type ApplyChannelOptions struct {
Yes bool Yes bool
configFlags genericclioptions.ConfigFlags
} }
func NewCmdApplyChannel(f Factory, out io.Writer) *cobra.Command { func NewCmdApplyChannel(f *ChannelsFactory, out io.Writer) *cobra.Command {
var options ApplyChannelOptions var options ApplyChannelOptions
cmd := &cobra.Command{ cmd := &cobra.Command{
@ -57,20 +62,29 @@ func NewCmdApplyChannel(f Factory, out io.Writer) *cobra.Command {
return cmd return cmd
} }
func RunApplyChannel(ctx context.Context, f Factory, out io.Writer, options *ApplyChannelOptions, args []string) error { func RunApplyChannel(ctx context.Context, f *ChannelsFactory, out io.Writer, options *ApplyChannelOptions, args []string) error {
k8sClient, err := f.KubernetesClient() restConfig, err := f.RESTConfig()
if err != nil {
return err
}
httpClient, err := f.HTTPClient()
if err != nil { if err != nil {
return err return err
} }
cmClient, err := f.CertManagerClient() k8sClient, err := kubernetes.NewForConfigAndClient(restConfig, httpClient)
if err != nil { if err != nil {
return err return fmt.Errorf("building kube client: %w", err)
}
cmClient, err := certmanager.NewForConfigAndClient(restConfig, httpClient)
if err != nil {
return fmt.Errorf("building cert manager client: %w", err)
} }
dynamicClient, err := f.DynamicClient() dynamicClient, err := f.DynamicClient()
if err != nil { if err != nil {
return err return fmt.Errorf("building dynamic client: %w", err)
} }
restMapper, err := f.RESTMapper() restMapper, err := f.RESTMapper()
@ -92,7 +106,7 @@ func RunApplyChannel(ctx context.Context, f Factory, out io.Writer, options *App
kubernetesVersion.Pre = nil kubernetesVersion.Pre = nil
if len(args) != 1 { if len(args) != 1 {
return fmt.Errorf("unexpected number of arguments. Only one channel may be processed at the same time.") return fmt.Errorf("unexpected number of arguments. Only one channel may be processed at the same time")
} }
channelLocation := args[0] channelLocation := args[0]

View File

@ -17,20 +17,101 @@ limitations under the License.
package cmd package cmd
import ( import (
"fmt"
"net/http"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/rest"
"k8s.io/client-go/restmapper" "k8s.io/client-go/restmapper"
"k8s.io/kops/util/pkg/vfs" "k8s.io/kops/util/pkg/vfs"
_ "k8s.io/client-go/plugin/pkg/client/auth" _ "k8s.io/client-go/plugin/pkg/client/auth"
certmanager "github.com/cert-manager/cert-manager/pkg/client/clientset/versioned"
) )
type Factory interface { type ChannelsFactory struct {
VFSContext() *vfs.VFSContext configFlags genericclioptions.ConfigFlags
KubernetesClient() (kubernetes.Interface, error) cachedRESTConfig *rest.Config
CertManagerClient() (certmanager.Interface, error) cachedHTTPClient *http.Client
RESTMapper() (*restmapper.DeferredDiscoveryRESTMapper, error) vfsContext *vfs.VFSContext
DynamicClient() (dynamic.Interface, error) restMapper *restmapper.DeferredDiscoveryRESTMapper
dynamicClient dynamic.Interface
}
func NewChannelsFactory() *ChannelsFactory {
return &ChannelsFactory{}
}
func (f *ChannelsFactory) RESTConfig() (*rest.Config, error) {
if f.cachedRESTConfig == nil {
clientGetter := genericclioptions.NewConfigFlags(true)
restConfig, err := clientGetter.ToRESTConfig()
if err != nil {
return nil, fmt.Errorf("cannot load kubecfg settings: %w", err)
}
restConfig.UserAgent = "kops"
restConfig.Burst = 50
restConfig.QPS = 20
f.cachedRESTConfig = restConfig
}
return f.cachedRESTConfig, nil
}
func (f *ChannelsFactory) HTTPClient() (*http.Client, error) {
if f.cachedHTTPClient == nil {
restConfig, err := f.RESTConfig()
if err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(restConfig)
if err != nil {
return nil, fmt.Errorf("getting http client: %w", err)
}
f.cachedHTTPClient = httpClient
}
return f.cachedHTTPClient, nil
}
func (f *ChannelsFactory) RESTMapper() (*restmapper.DeferredDiscoveryRESTMapper, error) {
if f.restMapper == nil {
discoveryClient, err := f.configFlags.ToDiscoveryClient()
if err != nil {
return nil, err
}
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
f.restMapper = restMapper
}
return f.restMapper, nil
}
func (f *ChannelsFactory) DynamicClient() (dynamic.Interface, error) {
if f.dynamicClient == nil {
restConfig, err := f.RESTConfig()
if err != nil {
return nil, err
}
httpClient, err := f.HTTPClient()
if err != nil {
return nil, err
}
dynamicClient, err := dynamic.NewForConfigAndClient(restConfig, httpClient)
if err != nil {
return nil, err
}
f.dynamicClient = dynamicClient
}
return f.dynamicClient, nil
}
func (f *ChannelsFactory) VFSContext() *vfs.VFSContext {
if f.vfsContext == nil {
// TODO vfs.NewVFSContext()
f.vfsContext = vfs.Context
}
return f.vfsContext
} }

View File

@ -22,7 +22,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
func NewCmdGet(f Factory, out io.Writer) *cobra.Command { func NewCmdGet(f *ChannelsFactory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "get", Use: "get",
SuggestFor: []string{"list"}, SuggestFor: []string{"list"},

View File

@ -41,13 +41,14 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/kops/channels/pkg/channels" "k8s.io/kops/channels/pkg/channels"
"k8s.io/kops/util/pkg/tables" "k8s.io/kops/util/pkg/tables"
) )
type GetAddonsOptions struct{} type GetAddonsOptions struct{}
func NewCmdGetAddons(f Factory, out io.Writer) *cobra.Command { func NewCmdGetAddons(f *ChannelsFactory, out io.Writer) *cobra.Command {
var options GetAddonsOptions var options GetAddonsOptions
cmd := &cobra.Command{ cmd := &cobra.Command{
@ -70,11 +71,20 @@ type addonInfo struct {
Namespace *v1.Namespace Namespace *v1.Namespace
} }
func RunGetAddons(ctx context.Context, f Factory, out io.Writer, options *GetAddonsOptions) error { func RunGetAddons(ctx context.Context, f *ChannelsFactory, out io.Writer, options *GetAddonsOptions) error {
k8sClient, err := f.KubernetesClient() restConfig, err := f.RESTConfig()
if err != nil { if err != nil {
return err return err
} }
httpClient, err := f.HTTPClient()
if err != nil {
return err
}
k8sClient, err := kubernetes.NewForConfigAndClient(restConfig, httpClient)
if err != nil {
return fmt.Errorf("building kube client: %w", err)
}
namespaces, err := k8sClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) namespaces, err := k8sClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {

View File

@ -17,6 +17,7 @@ limitations under the License.
package cmd package cmd
import ( import (
"context"
goflag "flag" goflag "flag"
"fmt" "fmt"
"io" "io"
@ -29,17 +30,17 @@ type CmdRootOptions struct {
configFile string configFile string
} }
func Execute(f Factory, out io.Writer) error { func Execute(ctx context.Context, f *ChannelsFactory, out io.Writer) error {
cobra.OnInitialize(initConfig) cobra.OnInitialize(initConfig)
cmd := NewCmdRoot(f, out) cmd := NewCmdRoot(f, out)
goflag.Set("logtostderr", "true") goflag.Set("logtostderr", "true")
goflag.CommandLine.Parse([]string{}) goflag.CommandLine.Parse([]string{})
return cmd.Execute() return cmd.ExecuteContext(ctx)
} }
func NewCmdRoot(f Factory, out io.Writer) *cobra.Command { func NewCmdRoot(f *ChannelsFactory, out io.Writer) *cobra.Command {
options := &CmdRootOptions{} options := &CmdRootOptions{}
cmd := &cobra.Command{ cmd := &cobra.Command{

View File

@ -30,6 +30,7 @@ import (
"k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth" _ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/kops/cmd/kops/util" "k8s.io/kops/cmd/kops/util"
kopsapi "k8s.io/kops/pkg/apis/kops" kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/cloudinstances" "k8s.io/kops/pkg/cloudinstances"
@ -163,11 +164,28 @@ func RunDeleteInstance(ctx context.Context, f *util.Factory, out io.Writer, opti
return err return err
} }
var nodes []v1.Node
var k8sClient kubernetes.Interface var k8sClient kubernetes.Interface
var host string var restConfig *rest.Config
if !options.CloudOnly { if !options.CloudOnly {
k8sClient, host, nodes, err = getNodes(ctx, cluster, true) restConfig, err = f.RESTConfig(cluster)
if err != nil {
return fmt.Errorf("getting rest config: %w", err)
}
httpClient, err := f.HTTPClient(cluster)
if err != nil {
return fmt.Errorf("getting http client: %w", err)
}
k8sClient, err = kubernetes.NewForConfigAndClient(restConfig, httpClient)
if err != nil {
return fmt.Errorf("cannot build kube client: %w", err)
}
}
var nodes []v1.Node
if !options.CloudOnly {
nodes, err = getNodes(ctx, k8sClient, true)
if err != nil { if err != nil {
return err return err
} }
@ -241,7 +259,7 @@ func RunDeleteInstance(ctx context.Context, f *util.Factory, out io.Writer, opti
var clusterValidator validation.ClusterValidator var clusterValidator validation.ClusterValidator
if !options.CloudOnly { if !options.CloudOnly {
clusterValidator, err = validation.NewClusterValidator(cluster, cloud, list, host, k8sClient) clusterValidator, err = validation.NewClusterValidator(cluster, cloud, list, restConfig, k8sClient)
if err != nil { if err != nil {
return fmt.Errorf("cannot create cluster validator: %v", err) return fmt.Errorf("cannot create cluster validator: %v", err)
} }
@ -251,37 +269,22 @@ func RunDeleteInstance(ctx context.Context, f *util.Factory, out io.Writer, opti
return d.UpdateSingleInstance(cloudMember, options.Surge) return d.UpdateSingleInstance(cloudMember, options.Surge)
} }
func getNodes(ctx context.Context, cluster *kopsapi.Cluster, verbose bool) (kubernetes.Interface, string, []v1.Node, error) { func getNodes(ctx context.Context, kubeClient kubernetes.Interface, verbose bool) ([]v1.Node, error) {
var nodes []v1.Node var nodes []v1.Node
var k8sClient kubernetes.Interface
contextName := cluster.ObjectMeta.Name nodeList, err := kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
clientGetter := genericclioptions.NewConfigFlags(true)
clientGetter.Context = &contextName
config, err := clientGetter.ToRESTConfig()
if err != nil {
return nil, "", nil, fmt.Errorf("cannot load kubecfg settings for %q: %v", contextName, err)
}
k8sClient, err = kubernetes.NewForConfig(config)
if err != nil {
return nil, "", nil, fmt.Errorf("cannot build kube client for %q: %v", contextName, err)
}
nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
if verbose { if verbose {
fmt.Fprintf(os.Stderr, "Unable to reach the kubernetes API.\n") fmt.Fprintf(os.Stderr, "Unable to reach the kubernetes API.\n")
fmt.Fprintf(os.Stderr, "Use --cloudonly to do a deletion without confirming progress with the k8s API\n\n") fmt.Fprintf(os.Stderr, "Use --cloudonly to do a deletion without confirming progress with the k8s API\n\n")
} }
return nil, "", nil, fmt.Errorf("listing nodes in cluster: %v", err) return nil, fmt.Errorf("listing nodes in cluster: %v", err)
} }
if nodeList != nil { if nodeList != nil {
nodes = nodeList.Items nodes = nodeList.Items
} }
return k8sClient, config.Host, nodes, nil return nodes, nil
} }
func deleteNodeMatch(cloudMember *cloudinstances.CloudInstance, options *DeleteInstanceOptions) bool { func deleteNodeMatch(cloudMember *cloudinstances.CloudInstance, options *DeleteInstanceOptions) bool {
@ -320,15 +323,25 @@ func completeInstanceOrNode(f commandutils.Factory, options *DeleteInstanceOptio
return completions, directive return completions, directive
} }
var nodes []v1.Node var kubeClient kubernetes.Interface
var err error
if !options.CloudOnly { if !options.CloudOnly {
_, _, nodes, err = getNodes(ctx, cluster, false) var err error
kubeClient, err = getKubeClientFromKubeconfig(ctx, cluster)
if err != nil { if err != nil {
cobra.CompErrorln(err.Error()) cobra.CompErrorln(err.Error())
} }
} }
var nodes []v1.Node
if kubeClient != nil {
nodeList, err := kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
cobra.CompErrorln(err.Error())
} else if nodeList != nil {
nodes = nodeList.Items
}
}
list, err := clientSet.InstanceGroupsFor(cluster).List(ctx, metav1.ListOptions{}) list, err := clientSet.InstanceGroupsFor(cluster).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return commandutils.CompletionError("listing instance groups", err) return commandutils.CompletionError("listing instance groups", err)
@ -369,6 +382,26 @@ func completeInstanceOrNode(f commandutils.Factory, options *DeleteInstanceOptio
} }
} }
// getKubeClientFromKubeconfig returns a kubernetes client from the kubeconfig,
// assuming it has already been exported. This is not ideal, but is reasonable
// for command completion.
func getKubeClientFromKubeconfig(ctx context.Context, cluster *kopsapi.Cluster) (kubernetes.Interface, error) {
contextName := cluster.ObjectMeta.Name
clientGetter := genericclioptions.NewConfigFlags(true)
clientGetter.Context = &contextName
config, err := clientGetter.ToRESTConfig()
if err != nil {
return nil, fmt.Errorf("cannot load kubecfg settings for %q: %w", contextName, err)
}
k8sClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("cannot build kube client for %q: %w", contextName, err)
}
return k8sClient, nil
}
func appendInstance(completions []string, instance *cloudinstances.CloudInstance, longestGroup int) []string { func appendInstance(completions []string, instance *cloudinstances.CloudInstance, longestGroup int) []string {
completion := instance.ID completion := instance.ID
if instance.CloudInstanceGroup.InstanceGroup != nil { if instance.CloudInstanceGroup.InstanceGroup != nil {

View File

@ -23,6 +23,7 @@ import (
"io" "io"
"strings" "strings"
"k8s.io/client-go/kubernetes"
"k8s.io/kops/pkg/cloudinstances" "k8s.io/kops/pkg/cloudinstances"
"k8s.io/kops/pkg/commands/commandutils" "k8s.io/kops/pkg/commands/commandutils"
"k8s.io/kubectl/pkg/util/i18n" "k8s.io/kubectl/pkg/util/i18n"
@ -31,10 +32,7 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/client-go/kubernetes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/kops/util/pkg/tables" "k8s.io/kops/util/pkg/tables"
@ -101,11 +99,21 @@ func RunGetInstances(ctx context.Context, f *util.Factory, out io.Writer, option
return err return err
} }
k8sClient, err := createK8sClient(cluster) restConfig, err := f.RESTConfig(cluster)
if err != nil { if err != nil {
return err return err
} }
httpClient, err := f.HTTPClient(cluster)
if err != nil {
return err
}
k8sClient, err := kubernetes.NewForConfigAndClient(restConfig, httpClient)
if err != nil {
return fmt.Errorf("building kubernetes client: %w", err)
}
nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
klog.Warningf("cannot list node names. Kubernetes API unavailable: %v", err) klog.Warningf("cannot list node names. Kubernetes API unavailable: %v", err)
@ -200,22 +208,6 @@ func instanceOutputTable(instances []*cloudinstances.CloudInstance, out io.Write
return t.Render(instances, out, columns...) return t.Render(instances, out, columns...)
} }
func createK8sClient(cluster *kops.Cluster) (*kubernetes.Clientset, error) {
contextName := cluster.ObjectMeta.Name
clientGetter := genericclioptions.NewConfigFlags(true)
clientGetter.Context = &contextName
config, err := clientGetter.ToRESTConfig()
if err != nil {
return nil, fmt.Errorf("cannot load kubecfg settings for %q: %v", contextName, err)
}
k8sClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("cannot build kubernetes api client for %q: %v", contextName, err)
}
return k8sClient, nil
}
func asRenderable(instances []*cloudinstances.CloudInstance) []*renderableCloudInstance { func asRenderable(instances []*cloudinstances.CloudInstance) []*renderableCloudInstance {
arr := make([]*renderableCloudInstance, len(instances)) arr := make([]*renderableCloudInstance, len(instances))
for i, ci := range instances { for i, ci := range instances {

View File

@ -30,7 +30,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth" _ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/kops/cmd/kops/util" "k8s.io/kops/cmd/kops/util"
@ -238,21 +237,22 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer
return err return err
} }
contextName := cluster.ObjectMeta.Name
clientGetter := genericclioptions.NewConfigFlags(true)
clientGetter.Context = &contextName
config, err := clientGetter.ToRESTConfig()
if err != nil {
return fmt.Errorf("cannot load kubecfg settings for %q: %v", contextName, err)
}
var nodes []v1.Node var nodes []v1.Node
var k8sClient kubernetes.Interface var k8sClient kubernetes.Interface
if !options.CloudOnly { if !options.CloudOnly {
k8sClient, err = kubernetes.NewForConfig(config) restConfig, err := f.RESTConfig(cluster)
if err != nil { if err != nil {
return fmt.Errorf("cannot build kube client for %q: %v", contextName, err) return fmt.Errorf("getting rest config: %w", err)
}
httpClient, err := f.HTTPClient(cluster)
if err != nil {
return fmt.Errorf("getting http client: %w", err)
}
k8sClient, err = kubernetes.NewForConfigAndClient(restConfig, httpClient)
if err != nil {
return fmt.Errorf("getting kubernetes client: %w", err)
} }
nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@ -449,7 +449,12 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer
var clusterValidator validation.ClusterValidator var clusterValidator validation.ClusterValidator
if !options.CloudOnly { if !options.CloudOnly {
clusterValidator, err = validation.NewClusterValidator(cluster, cloud, list, config.Host, k8sClient) restConfig, err := f.RESTConfig(cluster)
if err != nil {
return fmt.Errorf("getting rest config: %w", err)
}
clusterValidator, err = validation.NewClusterValidator(cluster, cloud, list, restConfig, k8sClient)
if err != nil { if err != nil {
return fmt.Errorf("cannot create cluster validator: %v", err) return fmt.Errorf("cannot create cluster validator: %v", err)
} }

View File

@ -17,11 +17,9 @@ limitations under the License.
package main package main
import ( import (
"context"
"io" "io"
channelscmd "k8s.io/kops/channels/pkg/cmd" channelscmd "k8s.io/kops/channels/pkg/cmd"
"k8s.io/kops/cmd/kops/util"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -34,8 +32,7 @@ func NewCmdToolboxAddons(out io.Writer) *cobra.Command {
SilenceUsage: true, SilenceUsage: true,
} }
f := util.NewFactory(nil) f := channelscmd.NewChannelsFactory()
ctx := context.Background()
// create subcommands // create subcommands
cmd.AddCommand(&cobra.Command{ cmd.AddCommand(&cobra.Command{
@ -43,6 +40,7 @@ func NewCmdToolboxAddons(out io.Writer) *cobra.Command {
Short: "Applies updates from the given channel", Short: "Applies updates from the given channel",
Example: "kops toolbox addons apply s3://<state_store>/<cluster_name>/addons/bootstrap-channel.yaml", Example: "kops toolbox addons apply s3://<state_store>/<cluster_name>/addons/bootstrap-channel.yaml",
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
return channelscmd.RunApplyChannel(ctx, f, out, &channelscmd.ApplyChannelOptions{}, args) return channelscmd.RunApplyChannel(ctx, f, out, &channelscmd.ApplyChannelOptions{}, args)
}, },
}) })
@ -50,6 +48,7 @@ func NewCmdToolboxAddons(out io.Writer) *cobra.Command {
Use: "list", Use: "list",
Short: "Lists installed addons", Short: "Lists installed addons",
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
return channelscmd.RunGetAddons(ctx, f, out, &channelscmd.GetAddonsOptions{}) return channelscmd.RunGetAddons(ctx, f, out, &channelscmd.GetAddonsOptions{})
}, },
}) })

View File

@ -176,6 +176,7 @@ func RunToolboxDump(ctx context.Context, f commandutils.Factory, out io.Writer,
var nodes corev1.NodeList var nodes corev1.NodeList
// TODO: We should use the factory to get the kubeconfig
kubeConfig, err := clientGetter.ToRESTConfig() kubeConfig, err := clientGetter.ToRESTConfig()
if err != nil { if err != nil {
klog.Warningf("cannot load kubeconfig settings for %q: %v", contextName, err) klog.Warningf("cannot load kubeconfig settings for %q: %v", contextName, err)

View File

@ -18,20 +18,20 @@ package util
import ( import (
"fmt" "fmt"
"net/http"
"net/url" "net/url"
"strings" "strings"
"sync"
certmanager "github.com/cert-manager/cert-manager/pkg/client/clientset/versioned"
"k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2" "k8s.io/klog/v2"
channelscmd "k8s.io/kops/channels/pkg/cmd"
gceacls "k8s.io/kops/pkg/acls/gce" gceacls "k8s.io/kops/pkg/acls/gce"
"k8s.io/kops/pkg/apis/kops"
kopsclient "k8s.io/kops/pkg/client/clientset_generated/clientset" kopsclient "k8s.io/kops/pkg/client/clientset_generated/clientset"
"k8s.io/kops/pkg/client/simple" "k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/client/simple/api" "k8s.io/kops/pkg/client/simple/api"
@ -44,24 +44,36 @@ type FactoryOptions struct {
} }
type Factory struct { type Factory struct {
ConfigFlags genericclioptions.ConfigFlags options *FactoryOptions
options *FactoryOptions clientset simple.Clientset
clientset simple.Clientset
kubernetesClient kubernetes.Interface vfsContext *vfs.VFSContext
certManagerClient certmanager.Interface
vfsContext *vfs.VFSContext
cachedRESTConfig *rest.Config // mutex protects access to the clusters map
dynamicClient dynamic.Interface mutex sync.Mutex
restMapper *restmapper.DeferredDiscoveryRESTMapper // clusters holds REST connection configuration for connecting to clusters
clusters map[string]*clusterInfo
}
// clusterInfo holds REST connection configuration for connecting to a cluster
type clusterInfo struct {
clusterName string
cachedHTTPClient *http.Client
cachedRESTConfig *rest.Config
cachedDynamicClient dynamic.Interface
} }
func NewFactory(options *FactoryOptions) *Factory { func NewFactory(options *FactoryOptions) *Factory {
gceacls.Register() gceacls.Register()
if options == nil {
options = &FactoryOptions{}
}
return &Factory{ return &Factory{
options: options, options: options,
clusters: make(map[string]*clusterInfo),
} }
} }
@ -143,14 +155,38 @@ func (f *Factory) KopsStateStore() string {
return f.options.RegistryPath return f.options.RegistryPath
} }
var _ channelscmd.Factory = &Factory{} func (f *Factory) getClusterInfo(clusterName string) *clusterInfo {
f.mutex.Lock()
defer f.mutex.Unlock()
func (f *Factory) restConfig() (*rest.Config, error) { if clusterInfo, ok := f.clusters[clusterName]; ok {
return clusterInfo
}
clusterInfo := &clusterInfo{}
f.clusters[clusterName] = clusterInfo
return clusterInfo
}
func (f *Factory) RESTConfig(cluster *kops.Cluster) (*rest.Config, error) {
clusterInfo := f.getClusterInfo(cluster.ObjectMeta.Name)
return clusterInfo.RESTConfig()
}
func (f *clusterInfo) RESTConfig() (*rest.Config, error) {
if f.cachedRESTConfig == nil { if f.cachedRESTConfig == nil {
restConfig, err := f.ConfigFlags.ToRESTConfig() // Get the kubeconfig from the context
if err != nil {
return nil, fmt.Errorf("cannot load kubecfg settings: %w", err) clientGetter := genericclioptions.NewConfigFlags(true)
if f.clusterName != "" {
contextName := f.clusterName
clientGetter.Context = &contextName
} }
restConfig, err := clientGetter.ToRESTConfig()
if err != nil {
return nil, fmt.Errorf("loading kubecfg settings for %q: %w", f.clusterName, err)
}
restConfig.UserAgent = "kops" restConfig.UserAgent = "kops"
restConfig.Burst = 50 restConfig.Burst = 50
restConfig.QPS = 20 restConfig.QPS = 20
@ -159,67 +195,51 @@ func (f *Factory) restConfig() (*rest.Config, error) {
return f.cachedRESTConfig, nil return f.cachedRESTConfig, nil
} }
func (f *Factory) KubernetesClient() (kubernetes.Interface, error) { func (f *Factory) HTTPClient(cluster *kops.Cluster) (*http.Client, error) {
if f.kubernetesClient == nil { clusterInfo := f.getClusterInfo(cluster.ObjectMeta.Name)
restConfig, err := f.restConfig() return clusterInfo.HTTPClient()
}
func (f *clusterInfo) HTTPClient() (*http.Client, error) {
if f.cachedHTTPClient == nil {
restConfig, err := f.RESTConfig()
if err != nil { if err != nil {
return nil, err return nil, err
} }
k8sClient, err := kubernetes.NewForConfig(restConfig) httpClient, err := rest.HTTPClientFor(restConfig)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot build kube client: %w", err) return nil, fmt.Errorf("building http client: %w", err)
} }
f.kubernetesClient = k8sClient f.cachedHTTPClient = httpClient
} }
return f.cachedHTTPClient, nil
return f.kubernetesClient, nil
} }
func (f *Factory) DynamicClient() (dynamic.Interface, error) { // DynamicClient returns a dynamic client
if f.dynamicClient == nil { func (f *Factory) DynamicClient(clusterName string) (dynamic.Interface, error) {
restConfig, err := f.restConfig() clusterInfo := f.getClusterInfo(clusterName)
if err != nil { return clusterInfo.DynamicClient()
return nil, fmt.Errorf("cannot load kubecfg settings: %w", err)
}
dynamicClient, err := dynamic.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("cannot build dynamicClient client: %v", err)
}
f.dynamicClient = dynamicClient
}
return f.dynamicClient, nil
} }
func (f *Factory) CertManagerClient() (certmanager.Interface, error) { func (f *clusterInfo) DynamicClient() (dynamic.Interface, error) {
if f.certManagerClient == nil { if f.cachedDynamicClient == nil {
restConfig, err := f.restConfig() restConfig, err := f.RESTConfig()
if err != nil {
return nil, err
}
certManagerClient, err := certmanager.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("cannot build kube client: %v", err)
}
f.certManagerClient = certManagerClient
}
return f.certManagerClient, nil
}
func (f *Factory) RESTMapper() (*restmapper.DeferredDiscoveryRESTMapper, error) {
if f.restMapper == nil {
discoveryClient, err := f.ConfigFlags.ToDiscoveryClient()
if err != nil { if err != nil {
return nil, err return nil, err
} }
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) httpClient, err := f.HTTPClient()
if err != nil {
return nil, err
}
f.restMapper = restMapper dynamicClient, err := dynamic.NewForConfigAndClient(restConfig, httpClient)
if err != nil {
return nil, fmt.Errorf("building dynamic client: %w", err)
}
f.cachedDynamicClient = dynamicClient
} }
return f.cachedDynamicClient, nil
return f.restMapper, nil
} }
func (f *Factory) VFSContext() *vfs.VFSContext { func (f *Factory) VFSContext() *vfs.VFSContext {

View File

@ -25,6 +25,7 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/client-go/kubernetes"
"k8s.io/kops/pkg/commands/commandutils" "k8s.io/kops/pkg/commands/commandutils"
"k8s.io/kops/upup/pkg/fi/cloudup" "k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kubectl/pkg/util/i18n" "k8s.io/kubectl/pkg/util/i18n"
@ -33,8 +34,6 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kops/cmd/kops/util" "k8s.io/kops/cmd/kops/util"
kopsapi "k8s.io/kops/pkg/apis/kops" kopsapi "k8s.io/kops/pkg/apis/kops"
@ -148,27 +147,37 @@ func RunValidateCluster(ctx context.Context, f *util.Factory, out io.Writer, opt
return nil, fmt.Errorf("no InstanceGroup objects found") return nil, fmt.Errorf("no InstanceGroup objects found")
} }
// TODO: Refactor into util.Factory // // TODO: Refactor into util.Factory
contextName := cluster.ObjectMeta.Name // contextName := cluster.ObjectMeta.Name
configLoadingRules := clientcmd.NewDefaultClientConfigLoadingRules() // configLoadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
if options.kubeconfig != "" { // if options.kubeconfig != "" {
configLoadingRules.ExplicitPath = options.kubeconfig // configLoadingRules.ExplicitPath = options.kubeconfig
} // }
config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( // config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
configLoadingRules, // configLoadingRules,
&clientcmd.ConfigOverrides{CurrentContext: contextName}).ClientConfig() // &clientcmd.ConfigOverrides{CurrentContext: contextName}).ClientConfig()
// if err != nil {
// return nil, fmt.Errorf("cannot load kubecfg settings for %q: %v", contextName, err)
// }
restConfig, err := f.RESTConfig(cluster)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot load kubecfg settings for %q: %v", contextName, err) return nil, fmt.Errorf("getting rest config: %w", err)
} }
k8sClient, err := kubernetes.NewForConfig(config) httpClient, err := f.HTTPClient(cluster)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot build kubernetes api client for %q: %v", contextName, err) return nil, fmt.Errorf("getting http client: %w", err)
}
k8sClient, err := kubernetes.NewForConfigAndClient(restConfig, httpClient)
if err != nil {
return nil, fmt.Errorf("building kubernetes client: %w", err)
} }
timeout := time.Now().Add(options.wait) timeout := time.Now().Add(options.wait)
validator, err := validation.NewClusterValidator(cluster, cloud, list, config.Host, k8sClient) validator, err := validation.NewClusterValidator(cluster, cloud, list, restConfig, k8sClient)
if err != nil { if err != nil {
return nil, fmt.Errorf("unexpected error creating validatior: %v", err) return nil, fmt.Errorf("unexpected error creating validatior: %v", err)
} }

View File

@ -17,6 +17,9 @@ limitations under the License.
package commandutils package commandutils
import ( import (
"k8s.io/client-go/rest"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/client/simple" "k8s.io/kops/pkg/client/simple"
"k8s.io/kops/util/pkg/vfs" "k8s.io/kops/util/pkg/vfs"
) )
@ -24,4 +27,5 @@ import (
type Factory interface { type Factory interface {
KopsClient() (simple.Clientset, error) KopsClient() (simple.Clientset, error)
VFSContext() *vfs.VFSContext VFSContext() *vfs.VFSContext
RESTConfig(cluster *kops.Cluster) (*rest.Config, error)
} }

View File

@ -37,7 +37,6 @@ import (
"golang.org/x/crypto/ssh/agent" "golang.org/x/crypto/ssh/agent"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
@ -110,14 +109,9 @@ func RunToolboxEnroll(ctx context.Context, f commandutils.Factory, out io.Writer
// Enroll the node over SSH. // Enroll the node over SSH.
if options.Host != "" { if options.Host != "" {
// TODO: This is the pattern we use a lot, but should we try to access it directly? restConfig, err := f.RESTConfig(fullCluster)
contextName := fullCluster.ObjectMeta.Name
clientGetter := genericclioptions.NewConfigFlags(true)
clientGetter.Context = &contextName
restConfig, err := clientGetter.ToRESTConfig()
if err != nil { if err != nil {
return fmt.Errorf("cannot load kubecfg settings for %q: %w", contextName, err) return err
} }
if err := enrollHost(ctx, fullInstanceGroup, options, bootstrapData, restConfig); err != nil { if err := enrollHost(ctx, fullInstanceGroup, options, bootstrapData, restConfig); err != nil {

View File

@ -25,11 +25,11 @@ import (
"time" "time"
"k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kops/pkg/client/simple"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
api "k8s.io/kops/pkg/apis/kops" api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/cloudinstances" "k8s.io/kops/pkg/cloudinstances"
"k8s.io/kops/pkg/validation" "k8s.io/kops/pkg/validation"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"

View File

@ -25,6 +25,7 @@ import (
"strings" "strings"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/pager" "k8s.io/client-go/tools/pager"
"k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
@ -62,7 +63,7 @@ type clusterValidatorImpl struct {
cluster *kops.Cluster cluster *kops.Cluster
cloud fi.Cloud cloud fi.Cloud
instanceGroups []*kops.InstanceGroup instanceGroups []*kops.InstanceGroup
host string restConfig *rest.Config
k8sClient kubernetes.Interface k8sClient kubernetes.Interface
} }
@ -100,7 +101,7 @@ func hasPlaceHolderIP(host string) (string, error) {
return "", nil return "", nil
} }
func NewClusterValidator(cluster *kops.Cluster, cloud fi.Cloud, instanceGroupList *kops.InstanceGroupList, host string, k8sClient kubernetes.Interface) (ClusterValidator, error) { func NewClusterValidator(cluster *kops.Cluster, cloud fi.Cloud, instanceGroupList *kops.InstanceGroupList, restConfig *rest.Config, k8sClient kubernetes.Interface) (ClusterValidator, error) {
var instanceGroups []*kops.InstanceGroup var instanceGroups []*kops.InstanceGroup
for i := range instanceGroupList.Items { for i := range instanceGroupList.Items {
@ -116,7 +117,7 @@ func NewClusterValidator(cluster *kops.Cluster, cloud fi.Cloud, instanceGroupLis
cluster: cluster, cluster: cluster,
cloud: cloud, cloud: cloud,
instanceGroups: instanceGroups, instanceGroups: instanceGroups,
host: host, restConfig: restConfig,
k8sClient: k8sClient, k8sClient: k8sClient,
}, nil }, nil
} }
@ -133,7 +134,7 @@ func (v *clusterValidatorImpl) Validate() (*ValidationCluster, error) {
dnsProvider = kops.ExternalDNSProviderExternalDNS dnsProvider = kops.ExternalDNSProviderExternalDNS
} }
hasPlaceHolderIPAddress, err := hasPlaceHolderIP(v.host) hasPlaceHolderIPAddress, err := hasPlaceHolderIP(v.restConfig.Host)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
kopsapi "k8s.io/kops/pkg/apis/kops" kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/cloudinstances" "k8s.io/kops/pkg/cloudinstances"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
@ -126,7 +127,10 @@ func testValidate(t *testing.T, groups map[string]*cloudinstances.CloudInstanceG
mockcloud := BuildMockCloud(t, groups, cluster, instanceGroups) mockcloud := BuildMockCloud(t, groups, cluster, instanceGroups)
validator, err := NewClusterValidator(cluster, mockcloud, &kopsapi.InstanceGroupList{Items: instanceGroups}, "https://api.testcluster.k8s.local", fake.NewSimpleClientset(objects...)) restConfig := &rest.Config{
Host: "https://api.testcluster.k8s.local",
}
validator, err := NewClusterValidator(cluster, mockcloud, &kopsapi.InstanceGroupList{Items: instanceGroups}, restConfig, fake.NewSimpleClientset(objects...))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -156,7 +160,10 @@ func Test_ValidateCloudGroupMissing(t *testing.T) {
mockcloud := BuildMockCloud(t, nil, cluster, instanceGroups) mockcloud := BuildMockCloud(t, nil, cluster, instanceGroups)
validator, err := NewClusterValidator(cluster, mockcloud, &kopsapi.InstanceGroupList{Items: instanceGroups}, "https://api.testcluster.k8s.local", fake.NewSimpleClientset()) restConfig := &rest.Config{
Host: "https://api.testcluster.k8s.local",
}
validator, err := NewClusterValidator(cluster, mockcloud, &kopsapi.InstanceGroupList{Items: instanceGroups}, restConfig, fake.NewSimpleClientset())
require.NoError(t, err) require.NoError(t, err)
v, err := validator.Validate() v, err := validator.Validate()
require.NoError(t, err) require.NoError(t, err)