fix some go-lint warning

Signed-off-by: ZouYu <zouy.fnst@cn.fujitsu.com>
This commit is contained in:
ZouYu 2020-06-01 16:25:52 +08:00
parent 2d2678f160
commit 2fc52ec6be
54 changed files with 151 additions and 158 deletions

View File

@ -142,11 +142,11 @@ func runCompletionBash(out io.Writer, cmd *cobra.Command) error {
}
func runCompletionZsh(out io.Writer, cmd *cobra.Command) error {
zsh_head := "#compdef kops\n"
zshHead := "#compdef kops\n"
out.Write([]byte(zsh_head))
out.Write([]byte(zshHead))
zsh_initialization := `
zshInitialization := `
__kops_bash_source() {
alias shopt=':'
alias _expand=_bash_expand
@ -272,18 +272,18 @@ __kops_convert_bash_to_zsh() {
-e "s/\\\$(type${RWORD}/\$(__kops_type/g" \
<<'BASH_COMPLETION_EOF'
`
out.Write([]byte(zsh_initialization))
out.Write([]byte(zshInitialization))
buf := new(bytes.Buffer)
cmd.GenBashCompletion(buf)
out.Write(buf.Bytes())
zsh_tail := `
zshTail := `
BASH_COMPLETION_EOF
}
__kops_bash_source <(__kops_convert_bash_to_zsh)
_complete kops 2>/dev/null
`
out.Write([]byte(zsh_tail))
out.Write([]byte(zshTail))
return nil
}

View File

@ -133,7 +133,7 @@ func RunCreateSecretCaCert(ctx context.Context, f *util.Factory, out io.Writer,
return fmt.Errorf("error loading certificate %q: %v", options.CaCertPath, err)
}
err = keyStore.StoreKeypair(fi.CertificateId_CA, cert, privateKey)
err = keyStore.StoreKeypair(fi.CertificateIDCA, cert, privateKey)
if err != nil {
return fmt.Errorf("error storing user provided keys %q %q: %v", options.CaCertPath, options.CaPrivateKeyPath, err)
}

View File

@ -127,7 +127,7 @@ func RunDeleteSecret(ctx context.Context, f *util.Factory, out io.Writer, option
if options.SecretID != "" {
var matches []*fi.KeystoreItem
for _, s := range secrets {
if s.Id == options.SecretID {
if s.ID == options.SecretID {
matches = append(matches, s)
}
}
@ -157,7 +157,7 @@ func RunDeleteSecret(ctx context.Context, f *util.Factory, out io.Writer, option
keyset := &kops.Keyset{}
keyset.Name = secrets[0].Name
keyset.Spec.Type = secrets[0].Type
err = keyStore.DeleteKeysetItem(keyset, secrets[0].Id)
err = keyStore.DeleteKeysetItem(keyset, secrets[0].ID)
}
if err != nil {
return fmt.Errorf("error deleting secret: %v", err)

View File

@ -119,7 +119,7 @@ func (c *DescribeSecretsCommand) Run(ctx context.Context, args []string) error {
for _, i := range items {
fmt.Fprintf(w, "Name:\t%s\n", i.Name)
fmt.Fprintf(w, "Type:\t%s\n", i.Type)
fmt.Fprintf(w, "Id:\t%s\n", i.Id)
fmt.Fprintf(w, "Id:\t%s\n", i.ID)
switch i.Type {
case kops.SecretTypeKeypair:

View File

@ -107,7 +107,7 @@ func listSecrets(keyStore fi.CAStore, secretStore fi.SecretStore, sshCredentialS
item := &fi.KeystoreItem{
Name: keyset.Name,
Type: keyset.Spec.Type,
Id: key.Id,
ID: key.Id,
}
items = append(items, item)
}
@ -146,7 +146,7 @@ func listSecrets(keyStore fi.CAStore, secretStore fi.SecretStore, sshCredentialS
}
item := &fi.KeystoreItem{
Name: l[i].Name,
Id: id,
ID: id,
Type: SecretTypeSSHPublicKey,
}
if l[i].Spec.PublicKey != "" {
@ -226,7 +226,7 @@ func RunGetSecrets(ctx context.Context, options *GetSecretsOptions, args []strin
return i.Name
})
t.AddColumn("ID", func(i *fi.KeystoreItem) string {
return i.Id
return i.ID
})
t.AddColumn("TYPE", func(i *fi.KeystoreItem) string {
return string(i.Type)

View File

@ -35,13 +35,13 @@ var (
kops update cluster k8s.cluster.site --yes --state=s3://kops-state-1234
`))
update_short = i18n.T("Update a cluster.")
updateShort = i18n.T("Update a cluster.")
)
func NewCmdUpdate(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "update",
Short: update_short,
Short: updateShort,
Long: updateLong,
Example: updateExample,
}

View File

@ -64,7 +64,7 @@ type DNSController struct {
// DNSController is a Context
var _ Context = &DNSController{}
// scope is a group of record objects
// DNSControllerScope is a group of record objects
type DNSControllerScope struct {
// ScopeName is the string id for this scope
ScopeName string
@ -84,7 +84,7 @@ type DNSControllerScope struct {
// DNSControllerScope is a Scope
var _ Scope = &DNSControllerScope{}
// NewDnsController creates a DnsController
// NewDNSController creates a DnsController
func NewDNSController(dnsProviders []dnsprovider.Interface, zoneRules *ZoneRules, updateInterval int) (*DNSController, error) {
dnsCache, err := newDNSCache(dnsProviders)
if err != nil {

View File

@ -18,4 +18,5 @@ limitations under the License.
dnsprovider supplies interfaces for dns service providers (e.g. Google Cloud DNS, AWS route53, etc).
Implementations exist in the providers sub-package
*/
package dnsprovider // import "k8s.io/kops/dnsprovider/pkg/dnsprovider"

View File

@ -35,9 +35,9 @@ type Factory func(config io.Reader) (Interface, error)
var providersMutex sync.Mutex
var providers = make(map[string]Factory)
// RegisterDnsProvider registers a dnsprovider.Factory by name. This
// RegisterDNSProvider registers a dnsprovider.Factory by name. This
// is expected to happen during startup.
func RegisterDnsProvider(name string, cloud Factory) {
func RegisterDNSProvider(name string, cloud Factory) {
providersMutex.Lock()
defer providersMutex.Unlock()
if _, found := providers[name]; found {

View File

@ -36,7 +36,7 @@ const (
var MaxBatchSize = 900
func init() {
dnsprovider.RegisterDnsProvider(ProviderName, func(config io.Reader) (dnsprovider.Interface, error) {
dnsprovider.RegisterDNSProvider(ProviderName, func(config io.Reader) (dnsprovider.Interface, error) {
return newRoute53(config)
})
}

View File

@ -44,7 +44,7 @@ type Config struct {
}
func init() {
dnsprovider.RegisterDnsProvider(ProviderName, func(config io.Reader) (dnsprovider.Interface, error) {
dnsprovider.RegisterDNSProvider(ProviderName, func(config io.Reader) (dnsprovider.Interface, error) {
return newCoreDNSProviderInterface(config)
})
}

View File

@ -41,7 +41,7 @@ const (
)
func init() {
dnsprovider.RegisterDnsProvider(ProviderName, func(config io.Reader) (dnsprovider.Interface, error) {
dnsprovider.RegisterDNSProvider(ProviderName, func(config io.Reader) (dnsprovider.Interface, error) {
return newCloudDns(config)
})
}

View File

@ -34,7 +34,7 @@ const (
)
func init() {
dnsprovider.RegisterDnsProvider(ProviderName, func(config io.Reader) (dnsprovider.Interface, error) {
dnsprovider.RegisterDNSProvider(ProviderName, func(config io.Reader) (dnsprovider.Interface, error) {
return newDesignate(config)
})
}

View File

@ -195,7 +195,7 @@ func (c *NodeupModelContext) KubeletKubeConfig() string {
// BuildPKIKubeconfig generates a kubeconfig
func (c *NodeupModelContext) BuildPKIKubeconfig(name string) (string, error) {
ca, err := c.GetCert(fi.CertificateId_CA)
ca, err := c.GetCert(fi.CertificateIDCA)
if err != nil {
return "", err
}

View File

@ -179,12 +179,12 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte
b.Cluster.Spec.KubeAPIServer.AuthenticationTokenWebhookConfigFile = fi.String(PathAuthnConfig)
{
caCertificate, err := b.NodeupModelContext.KeyStore.FindCert(fi.CertificateId_CA)
caCertificate, err := b.NodeupModelContext.KeyStore.FindCert(fi.CertificateIDCA)
if err != nil {
return fmt.Errorf("error fetching AWS IAM Authentication CA certificate from keystore: %v", err)
}
if caCertificate == nil {
return fmt.Errorf("AWS IAM Authentication CA certificate %q not found", fi.CertificateId_CA)
return fmt.Errorf("AWS IAM Authentication CA certificate %q not found", fi.CertificateIDCA)
}
cluster := kubeconfig.KubectlCluster{

View File

@ -84,7 +84,7 @@ func (b *KubeAPIServerBuilder) addHealthcheckSidecarTasks(c *fi.ModelBuilderCont
issueCert := &nodetasks.IssueCert{
Name: id,
Signer: fi.CertificateId_CA,
Signer: fi.CertificateIDCA,
Type: "client",
Subject: pkix.Name{
CommonName: id,

View File

@ -51,7 +51,7 @@ func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
// Include the CA Key
// @TODO: use a per-machine key? use KMS?
if err := b.BuildPrivateKeyTask(c, fi.CertificateId_CA, "ca.key"); err != nil {
if err := b.BuildPrivateKeyTask(c, fi.CertificateIDCA, "ca.key"); err != nil {
return err
}

View File

@ -128,7 +128,7 @@ func (b *KubectlBuilder) findKubeconfigUser() (*fi.User, *fi.Group, error) {
if user == nil {
continue
}
group, err := fi.LookupGroupById(user.Gid)
group, err := fi.LookupGroupByID(user.Gid)
if err != nil {
klog.Warningf("unable to find group %d for user %q", user.Gid, s)
continue

View File

@ -427,9 +427,9 @@ func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, erro
// Merge KubeletConfig for NodeLabels
c := &kops.KubeletConfigSpec{}
if isMaster {
reflectutils.JsonMergeStruct(c, b.Cluster.Spec.MasterKubelet)
reflectutils.JSONMergeStruct(c, b.Cluster.Spec.MasterKubelet)
} else {
reflectutils.JsonMergeStruct(c, b.Cluster.Spec.Kubelet)
reflectutils.JSONMergeStruct(c, b.Cluster.Spec.Kubelet)
}
// check if we are using secure kubelet <-> api settings
@ -489,7 +489,7 @@ func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, erro
}
if b.InstanceGroup.Spec.Kubelet != nil {
reflectutils.JsonMergeStruct(c, b.InstanceGroup.Spec.Kubelet)
reflectutils.JSONMergeStruct(c, b.InstanceGroup.Spec.Kubelet)
}
// Use --register-with-taints
@ -552,7 +552,7 @@ func (b *KubeletBuilder) buildMasterKubeletKubeconfig() (*nodetasks.File, error)
}
req := &pki.IssueCertRequest{
Signer: fi.CertificateId_CA,
Signer: fi.CertificateIDCA,
Type: "client",
Subject: pkix.Name{
CommonName: fmt.Sprintf("system:node:%s", nodeName),

View File

@ -50,7 +50,7 @@ func (b *CalicoBuilder) Build(c *fi.ModelBuilderContext) error {
if err := b.BuildPrivateKeyTask(c, name, key); err != nil {
return err
}
if err := b.BuildCertificateTask(c, fi.CertificateId_CA, ca); err != nil {
if err := b.BuildCertificateTask(c, fi.CertificateIDCA, ca); err != nil {
return err
}
}

View File

@ -47,7 +47,7 @@ func (b *NodeAuthorizationBuilder) Build(c *fi.ModelBuilderContext) error {
return err
}
// creates /src/kubernetes/node-authorizer/ca.pem
if err := b.BuildCertificateTask(c, fi.CertificateId_CA, filepath.Join(name, "ca.pem")); err != nil {
if err := b.BuildCertificateTask(c, fi.CertificateIDCA, filepath.Join(name, "ca.pem")); err != nil {
return err
}
}
@ -58,7 +58,7 @@ func (b *NodeAuthorizationBuilder) Build(c *fi.ModelBuilderContext) error {
if err := b.BuildCertificatePairTask(c, "node-authorizer-client", authorizerDir, "tls"); err != nil {
return err
}
if err := b.BuildCertificateTask(c, fi.CertificateId_CA, authorizerDir+"/ca.pem"); err != nil {
if err := b.BuildCertificateTask(c, fi.CertificateIDCA, authorizerDir+"/ca.pem"); err != nil {
return err
}
}

View File

@ -45,7 +45,7 @@ func (b *SecretBuilder) Build(c *fi.ModelBuilderContext) error {
}
// @step: retrieve the platform ca
if err := b.BuildCertificateTask(c, fi.CertificateId_CA, "ca.crt"); err != nil {
if err := b.BuildCertificateTask(c, fi.CertificateIDCA, "ca.crt"); err != nil {
return err
}

View File

@ -136,13 +136,13 @@ func TestValidate_RemapImage_ContainerProxy_AppliesToImagesWithTags(t *testing.T
func TestValidate_RemapImage_ContainerRegistry_MappingMultipleTimesConverges(t *testing.T) {
builder := buildAssetBuilder(t)
mirrorUrl := "proxy.example.com"
mirrorURL := "proxy.example.com"
image := "kube-apiserver:1.2.3"
expected := "proxy.example.com/kube-apiserver:1.2.3"
version, _ := util.ParseKubernetesVersion("1.10")
builder.KubernetesVersion = *version
builder.AssetsLocation.ContainerRegistry = &mirrorUrl
builder.AssetsLocation.ContainerRegistry = &mirrorURL
remapped := image
iterations := make([]map[int]int, 2)

View File

@ -231,7 +231,7 @@ func (b *Builder) Build(cluster *kops.Cluster, ig *kops.InstanceGroup) (*Data, e
func (b *Builder) buildPKIFiles(cluster *kops.Cluster, ig *kops.InstanceGroup, keyStore fi.CAStore) ([]*DataFile, error) {
var files []*DataFile
certs := []string{fi.CertificateId_CA, "kubelet"}
certs := []string{fi.CertificateIDCA, "kubelet"}
keys := []string{"kubelet"}
// Used by kube-proxy to auth to API

View File

@ -39,13 +39,13 @@ import (
const rollingUpdateTaintKey = "kops.k8s.io/scheduled-for-update"
// promptInteractive asks the user to continue, mostly copied from vendor/google.golang.org/api/examples/gmail.go.
func promptInteractive(upgradedHostId, upgradedHostName string) (stopPrompting bool, err error) {
func promptInteractive(upgradedHostID, upgradedHostName string) (stopPrompting bool, err error) {
stopPrompting = false
scanner := bufio.NewScanner(os.Stdin)
if upgradedHostName != "" {
klog.Infof("Pausing after finished %q, node %q", upgradedHostId, upgradedHostName)
klog.Infof("Pausing after finished %q, node %q", upgradedHostID, upgradedHostName)
} else {
klog.Infof("Pausing after finished %q", upgradedHostId)
klog.Infof("Pausing after finished %q", upgradedHostID)
}
fmt.Print("Continue? (Y)es, (N)o, (A)lwaysYes: [Y] ")
scanner.Scan()
@ -321,7 +321,7 @@ func (c *RollingUpdateCluster) patchTaint(ctx context.Context, node *corev1.Node
}
func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *cloudinstances.CloudInstanceGroupMember, isBastion bool, sleepAfterTerminate time.Duration) error {
instanceId := u.ID
instanceID := u.ID
nodeName := ""
if u.Node != nil {
@ -346,7 +346,7 @@ func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *clo
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
}
} else {
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceID)
}
}
@ -354,7 +354,7 @@ func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *clo
// (It often seems like GCE tries to re-use names)
if !isBastion && !c.CloudOnly {
if u.Node == nil {
klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceId)
klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceID)
} else {
klog.Infof("deleting node %q from kubernetes", nodeName)
if err := c.deleteNode(ctx, u.Node); err != nil {
@ -364,7 +364,7 @@ func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *clo
}
if err := c.deleteInstance(u); err != nil {
klog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err)
klog.Errorf("error deleting instance %q, node %q: %v", instanceID, nodeName, err)
return err
}
@ -415,11 +415,10 @@ func (c *RollingUpdateCluster) validateClusterWithTimeout(validateCount int) err
if successCount >= validateCount {
klog.Info("Cluster validated.")
return nil
} else {
klog.Infof("Cluster validated; revalidating in %s to make sure it does not flap.", c.ValidateSuccessDuration)
time.Sleep(c.ValidateSuccessDuration)
continue
}
klog.Infof("Cluster validated; revalidating in %s to make sure it does not flap.", c.ValidateSuccessDuration)
time.Sleep(c.ValidateSuccessDuration)
continue
}
if err != nil {
@ -467,9 +466,8 @@ func (c *RollingUpdateCluster) detachInstance(u *cloudinstances.CloudInstanceGro
if err := c.Cloud.DetachInstance(u); err != nil {
if nodeName != "" {
return fmt.Errorf("error detaching instance %q, node %q: %v", id, nodeName, err)
} else {
return fmt.Errorf("error detaching instance %q: %v", id, err)
}
return fmt.Errorf("error detaching instance %q: %v", id, err)
}
return nil
@ -491,9 +489,8 @@ func (c *RollingUpdateCluster) deleteInstance(u *cloudinstances.CloudInstanceGro
if err := c.Cloud.DeleteInstance(u); err != nil {
if nodeName != "" {
return fmt.Errorf("error deleting instance %q, node %q: %v", id, nodeName, err)
} else {
return fmt.Errorf("error deleting instance %q: %v", id, err)
}
return fmt.Errorf("error deleting instance %q: %v", id, err)
}
return nil

View File

@ -89,7 +89,7 @@ func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.Se
// add the CA Cert to the kubeconfig only if we didn't specify a SSL cert for the LB
if cluster.Spec.API == nil || cluster.Spec.API.LoadBalancer == nil || cluster.Spec.API.LoadBalancer.SSLCertificate == "" {
cert, _, _, err := keyStore.FindKeypair(fi.CertificateId_CA)
cert, _, _, err := keyStore.FindKeypair(fi.CertificateIDCA)
if err != nil {
return nil, fmt.Errorf("error fetching CA keypair: %v", err)
}

View File

@ -40,7 +40,7 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
// TODO: Only create the CA via this task
defaultCA := &fitasks.Keypair{
Name: fi.String(fi.CertificateId_CA),
Name: fi.String(fi.CertificateIDCA),
Lifecycle: b.Lifecycle,
Subject: "cn=kubernetes",
Type: "ca",

View File

@ -39,13 +39,13 @@ func BuildNodeLabels(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (
// Merge KubeletConfig for NodeLabels
c := &kops.KubeletConfigSpec{}
if isMaster {
reflectutils.JsonMergeStruct(c, cluster.Spec.MasterKubelet)
reflectutils.JSONMergeStruct(c, cluster.Spec.MasterKubelet)
} else {
reflectutils.JsonMergeStruct(c, cluster.Spec.Kubelet)
reflectutils.JSONMergeStruct(c, cluster.Spec.Kubelet)
}
if instanceGroup.Spec.Kubelet != nil {
reflectutils.JsonMergeStruct(c, instanceGroup.Spec.Kubelet)
reflectutils.JSONMergeStruct(c, instanceGroup.Spec.Kubelet)
}
nodeLabels := c.NodeLabels

View File

@ -39,7 +39,7 @@ const (
)
func init() {
dnsprovider.RegisterDnsProvider(providerName, func(config io.Reader) (dnsprovider.Interface, error) {
dnsprovider.RegisterDNSProvider(providerName, func(config io.Reader) (dnsprovider.Interface, error) {
client, err := newClient()
if err != nil {
return nil, err
@ -54,7 +54,7 @@ type TokenSource struct {
AccessToken string
}
// Token() returns oauth2.Token
// Token returns oauth2.Token
func (t *TokenSource) Token() (*oauth2.Token, error) {
token := &oauth2.Token{
AccessToken: t.AccessToken,

View File

@ -87,15 +87,15 @@ func (l StringOrSlice) Equal(r StringOrSlice) bool {
// MarshalJSON implements the json.Marshaller interface.
func (v StringOrSlice) MarshalJSON() ([]byte, error) {
encodeAsJsonArray := v.forceEncodeAsArray
encodeAsJSONArray := v.forceEncodeAsArray
if len(v.values) > 1 {
encodeAsJsonArray = true
encodeAsJSONArray = true
}
values := v.values
if values == nil {
values = []string{}
}
if encodeAsJsonArray {
if encodeAsJSONArray {
return json.Marshal(values)
} else if len(v.values) == 1 {
s := v.values[0]

View File

@ -58,15 +58,15 @@ func TestRoundTrip(t *testing.T) {
},
}
for _, g := range grid {
actualJson, err := json.Marshal(g.Value)
actualJSON, err := json.Marshal(g.Value)
if err != nil {
t.Errorf("error encoding StringOrSlice %s to json: %v", g.Value, err)
}
klog.V(8).Infof("marshalled %s -> %q", g.Value, actualJson)
klog.V(8).Infof("marshalled %s -> %q", g.Value, actualJSON)
if g.JSON != string(actualJson) {
t.Errorf("Unexpected JSON encoding. Actual=%q, Expected=%q", string(actualJson), g.JSON)
if g.JSON != string(actualJSON) {
t.Errorf("Unexpected JSON encoding. Actual=%q, Expected=%q", string(actualJSON), g.JSON)
}
parsed := &StringOrSlice{}

View File

@ -83,7 +83,7 @@ func Uint64Value(v *uint64) uint64 {
return *v
}
func DebugAsJsonString(v interface{}) string {
func DebugAsJSONString(v interface{}) string {
data, err := json.Marshal(v)
if err != nil {
return fmt.Sprintf("error marshaling: %v", err)

View File

@ -47,7 +47,7 @@ type Source struct {
ExtractFromArchive string
}
// Builds a unique key for this source
// Key builds a unique key for this source
func (s *Source) Key() string {
var k string
if s.Parent != nil {
@ -129,7 +129,7 @@ func (a *AssetStore) Find(key string, assetPath string) (Resource, error) {
return nil, fmt.Errorf("found multiple matching assets for key: %q", key)
}
func hashFromHttpHeader(url string) (*hashing.Hash, error) {
func hashFromHTTPHeader(url string) (*hashing.Hash, error) {
klog.Infof("Doing HTTP HEAD on %q", url)
response, err := http.Head(url)
if err != nil {
@ -180,7 +180,7 @@ func (a *AssetStore) addURLs(urls []string, hash *hashing.Hash) error {
var err error
if hash == nil {
for _, url := range urls {
hash, err = hashFromHttpHeader(url)
hash, err = hashFromHTTPHeader(url)
if err != nil {
klog.Warningf("unable to get hash from %q: %v", url, err)
continue

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kops/util/pkg/vfs"
)
const CertificateId_CA = "ca"
const CertificateIDCA = "ca"
const (
// SecretNameSSHPrimary is the Name for the primary SSH key
@ -39,7 +39,7 @@ const (
type KeystoreItem struct {
Type kops.KeysetType
Name string
Id string
ID string
Data []byte
}

View File

@ -409,8 +409,8 @@ func deleteKeysetItem(client kopsinternalversion.KeysetInterface, name string, k
return nil
}
// addSshCredential saves the specified SSH Credential to the registry, doing an update or insert
func (c *ClientsetCAStore) addSshCredential(ctx context.Context, name string, publicKey string) error {
// addSSHCredential saves the specified SSH Credential to the registry, doing an update or insert
func (c *ClientsetCAStore) addSSHCredential(ctx context.Context, name string, publicKey string) error {
create := false
client := c.clientset.SSHCredentials(c.namespace)
sshCredential, err := client.Get(ctx, name, metav1.GetOptions{})
@ -487,7 +487,7 @@ func (c *ClientsetCAStore) AddSSHPublicKey(name string, pubkey []byte) error {
//}
//id = formatFingerprint(h.Sum(nil))
return c.addSshCredential(ctx, name, string(pubkey))
return c.addSSHCredential(ctx, name, string(pubkey))
}
// FindSSHPublicKeys implements CAStore::FindSSHPublicKeys

View File

@ -636,10 +636,10 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
KopsModelContext: modelContext,
}
storageAclLifecycle := securityLifecycle
if storageAclLifecycle != fi.LifecycleIgnore {
storageACLLifecycle := securityLifecycle
if storageACLLifecycle != fi.LifecycleIgnore {
// This is a best-effort permissions fix
storageAclLifecycle = fi.LifecycleWarnIfInsufficientAccess
storageACLLifecycle = fi.LifecycleWarnIfInsufficientAccess
}
l.Builders = append(l.Builders,
@ -652,7 +652,7 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
)
l.Builders = append(l.Builders,
&gcemodel.StorageAclBuilder{GCEModelContext: gceModelContext, Cloud: cloud.(gce.GCECloud), Lifecycle: &storageAclLifecycle},
&gcemodel.StorageAclBuilder{GCEModelContext: gceModelContext, Cloud: cloud.(gce.GCECloud), Lifecycle: &storageACLLifecycle},
)
case kops.CloudProviderALI:
@ -1031,7 +1031,7 @@ func (c *ApplyClusterCmd) validateKubernetesVersion() error {
klog.Warningf("unable to parse kops version %q", kopsVersion)
} else {
tooNewVersion := kopsVersion
tooNewVersion.Minor += 1
tooNewVersion.Minor++
tooNewVersion.Pre = nil
tooNewVersion.Build = nil
if util.IsKubernetesGTE(tooNewVersion.String(), *parsed) {

View File

@ -35,7 +35,7 @@ import (
)
const (
// This IP is from TEST-NET-3
// PlaceholderIP is from TEST-NET-3
// https://en.wikipedia.org/wiki/Reserved_IP_addresses
PlaceholderIP = "203.0.113.123"
PlaceholderTTL = 10
@ -111,9 +111,8 @@ func validateDNS(cluster *kops.Cluster, cloud fi.Cloud) error {
if len(ns) == 0 {
if os.Getenv("DNS_IGNORE_NS_CHECK") == "" {
return fmt.Errorf("NS records not found for %q - please make sure they are correctly configured", dnsName)
} else {
klog.Warningf("Ignoring failed NS record check because DNS_IGNORE_NS_CHECK is set")
}
klog.Warningf("Ignoring failed NS record check because DNS_IGNORE_NS_CHECK is set")
} else {
var hosts []string
for _, n := range ns {

View File

@ -414,14 +414,14 @@ func (l *Loader) loadObjectMap(key string, data map[string]interface{}) (map[str
loaded := make(map[string]interface{})
for k, v := range data {
typeId := ""
typeID := ""
name := ""
// If the name & type are not specified in the values,
// we infer them from the key (first component -> typeid, last component -> name)
if vMap, ok := v.(map[string]interface{}); ok {
if s, ok := vMap[KEY_TYPE]; ok {
typeId = s.(string)
typeID = s.(string)
}
if s, ok := vMap[KEY_NAME]; ok {
name = s.(string)
@ -436,19 +436,19 @@ func (l *Loader) loadObjectMap(key string, data map[string]interface{}) (map[str
inferredName = true
}
if typeId == "" {
if typeID == "" {
firstSlash := strings.Index(k, "/")
if firstSlash != -1 {
typeId = k[:firstSlash]
typeID = k[:firstSlash]
}
if typeId == "" {
if typeID == "" {
return nil, fmt.Errorf("cannot determine type for %q", k)
}
}
t, found := l.typeMap[typeId]
t, found := l.typeMap[typeID]
if !found {
return nil, fmt.Errorf("unknown type %q (in %q)", typeId, key)
return nil, fmt.Errorf("unknown type %q (in %q)", typeID, key)
}
o := reflect.New(t)

View File

@ -66,9 +66,8 @@ func findCNIAssets(c *kopsapi.Cluster, assetBuilder *assets.AssetBuilder) (*url.
return nil, nil, fmt.Errorf("unable to parse CNI asset hash %q", cniAssetHashString)
}
return u, hash, nil
} else {
return u, nil, nil
}
return u, nil, nil
}
sv, err := util.ParseKubernetesVersion(c.Spec.KubernetesVersion)

View File

@ -94,7 +94,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
// Copy cluster & instance groups, so we can modify them freely
cluster := &kopsapi.Cluster{}
reflectutils.JsonMergeStruct(cluster, c.InputCluster)
reflectutils.JSONMergeStruct(cluster, c.InputCluster)
err := c.assignSubnets(cluster)
if err != nil {

View File

@ -66,7 +66,7 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup,
}
ig := &kops.InstanceGroup{}
reflectutils.JsonMergeStruct(ig, input)
reflectutils.JSONMergeStruct(ig, input)
// TODO: Clean up
if ig.IsMaster() {

View File

@ -42,8 +42,8 @@ func (l *SpecBuilder) BuildCompleteSpec(clusterSpec *kopsapi.ClusterSpec) (*kops
// Master kubelet config = (base kubelet config + master kubelet config)
masterKubelet := &kopsapi.KubeletConfigSpec{}
reflectutils.JsonMergeStruct(masterKubelet, completed.Kubelet)
reflectutils.JsonMergeStruct(masterKubelet, completed.MasterKubelet)
reflectutils.JSONMergeStruct(masterKubelet, completed.Kubelet)
reflectutils.JSONMergeStruct(masterKubelet, completed.MasterKubelet)
completed.MasterKubelet = masterKubelet
klog.V(1).Infof("options: %s", fi.DebugAsJsonStringIndent(completed))

View File

@ -61,7 +61,7 @@ type TemplateFunctions struct {
tags sets.String
}
// This will define the available functions we can use in our YAML models
// AddTo defines the available functions we can use in our YAML models.
// If we are trying to get a new function implemented it MUST
// be defined here.
func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretStore) (err error) {
@ -100,22 +100,20 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS
dest["NodeLocalDNSClusterIP"] = func() string {
if tf.cluster.Spec.KubeProxy.ProxyMode == "ipvs" {
return tf.cluster.Spec.KubeDNS.ServerIP
} else {
return "__PILLAR__CLUSTER__DNS__"
}
return "__PILLAR__CLUSTER__DNS__"
}
dest["NodeLocalDNSServerIP"] = func() string {
if tf.cluster.Spec.KubeProxy.ProxyMode == "ipvs" {
return ""
} else {
return tf.cluster.Spec.KubeDNS.ServerIP
}
return tf.cluster.Spec.KubeDNS.ServerIP
}
dest["KopsControllerArgv"] = tf.KopsControllerArgv
dest["KopsControllerConfig"] = tf.KopsControllerConfig
dest["DnsControllerArgv"] = tf.DnsControllerArgv
dest["ExternalDnsArgv"] = tf.ExternalDnsArgv
dest["DnsControllerArgv"] = tf.DNSControllerArgv
dest["ExternalDnsArgv"] = tf.ExternalDNSArgv
dest["CloudControllerConfigArgv"] = tf.CloudControllerConfigArgv
// TODO: Only for GCE?
dest["EncodeGCELabel"] = gce.EncodeGCELabel
@ -268,8 +266,8 @@ func (tf *TemplateFunctions) CloudControllerConfigArgv() ([]string, error) {
return argv, nil
}
// DnsControllerArgv returns the args to the DNS controller
func (tf *TemplateFunctions) DnsControllerArgv() ([]string, error) {
// DNSControllerArgv returns the args to the DNS controller
func (tf *TemplateFunctions) DNSControllerArgv() ([]string, error) {
var argv []string
argv = append(argv, "/usr/bin/dns-controller")
@ -409,7 +407,7 @@ func (tf *TemplateFunctions) KopsControllerArgv() ([]string, error) {
return argv, nil
}
func (tf *TemplateFunctions) ExternalDnsArgv() ([]string, error) {
func (tf *TemplateFunctions) ExternalDNSArgv() ([]string, error) {
var argv []string
cloudProvider := tf.cluster.Spec.CloudProvider

View File

@ -30,7 +30,7 @@ import (
)
const (
defaultKopsBaseUrl = "https://kubeupv2.s3.amazonaws.com/kops/%s/"
defaultKopsBaseURL = "https://kubeupv2.s3.amazonaws.com/kops/%s/"
// defaultKopsMirrorBase will be detected and automatically set to pull from the defaultKopsMirrors
defaultKopsMirrorBase = "https://kubeupv2.s3.amazonaws.com/kops/%s/"
@ -54,7 +54,7 @@ var defaultKopsMirrors = []mirror{
{URL: "https://kubeupv2.s3.amazonaws.com/kops/%s/"},
}
var kopsBaseUrl *url.URL
var kopsBaseURL *url.URL
// nodeUpAsset caches the nodeup download urls/hash
var nodeUpAsset *MirroredAsset
@ -65,33 +65,33 @@ var protokubeLocation *url.URL
// protokubeHash caches the hash for protokube
var protokubeHash *hashing.Hash
// BaseUrl returns the base url for the distribution of kops - in particular for nodeup & docker images
func BaseUrl() (*url.URL, error) {
// BaseURL returns the base url for the distribution of kops - in particular for nodeup & docker images
func BaseURL() (*url.URL, error) {
// returning cached value
// Avoid repeated logging
if kopsBaseUrl != nil {
klog.V(8).Infof("Using cached kopsBaseUrl url: %q", kopsBaseUrl.String())
return copyBaseURL(kopsBaseUrl)
if kopsBaseURL != nil {
klog.V(8).Infof("Using cached kopsBaseUrl url: %q", kopsBaseURL.String())
return copyBaseURL(kopsBaseURL)
}
baseUrlString := os.Getenv("KOPS_BASE_URL")
baseURLString := os.Getenv("KOPS_BASE_URL")
var err error
if baseUrlString == "" {
baseUrlString = fmt.Sprintf(defaultKopsBaseUrl, kops.Version)
klog.V(8).Infof("Using default base url: %q", baseUrlString)
kopsBaseUrl, err = url.Parse(baseUrlString)
if baseURLString == "" {
baseURLString = fmt.Sprintf(defaultKopsBaseURL, kops.Version)
klog.V(8).Infof("Using default base url: %q", baseURLString)
kopsBaseURL, err = url.Parse(baseURLString)
if err != nil {
return nil, fmt.Errorf("unable to parse %q as a url: %v", baseUrlString, err)
return nil, fmt.Errorf("unable to parse %q as a url: %v", baseURLString, err)
}
} else {
kopsBaseUrl, err = url.Parse(baseUrlString)
kopsBaseURL, err = url.Parse(baseURLString)
if err != nil {
return nil, fmt.Errorf("unable to parse env var KOPS_BASE_URL %q as a url: %v", baseUrlString, err)
return nil, fmt.Errorf("unable to parse env var KOPS_BASE_URL %q as a url: %v", baseURLString, err)
}
klog.Warningf("Using base url from KOPS_BASE_URL env var: %q", baseUrlString)
klog.Warningf("Using base url from KOPS_BASE_URL env var: %q", baseURLString)
}
return copyBaseURL(kopsBaseUrl)
return copyBaseURL(kopsBaseURL)
}
// copyBaseURL makes a copy of the base url or the path.Joins can append stuff to this URL
@ -109,7 +109,7 @@ func SetKopsAssetsLocations(assetsBuilder *assets.AssetBuilder) error {
for _, s := range []string{
"linux/amd64/kops", "darwin/amd64/kops",
} {
_, _, err := KopsFileUrl(s, assetsBuilder)
_, _, err := KopsFileURL(s, assetsBuilder)
if err != nil {
return err
}
@ -130,7 +130,7 @@ func NodeUpAsset(assetsBuilder *assets.AssetBuilder) (*MirroredAsset, error) {
var u *url.URL
var hash *hashing.Hash
if env == "" {
u, hash, err = KopsFileUrl("linux/amd64/nodeup", assetsBuilder)
u, hash, err = KopsFileURL("linux/amd64/nodeup", assetsBuilder)
if err != nil {
return nil, err
}
@ -171,7 +171,7 @@ func ProtokubeImageSource(assetsBuilder *assets.AssetBuilder) (*url.URL, *hashin
env := os.Getenv("PROTOKUBE_IMAGE")
var err error
if env == "" {
protokubeLocation, protokubeHash, err = KopsFileUrl("images/protokube.tar.gz", assetsBuilder)
protokubeLocation, protokubeHash, err = KopsFileURL("images/protokube.tar.gz", assetsBuilder)
if err != nil {
return nil, nil, err
}
@ -192,21 +192,21 @@ func ProtokubeImageSource(assetsBuilder *assets.AssetBuilder) (*url.URL, *hashin
return protokubeLocation, protokubeHash, nil
}
// KopsFileUrl returns the base url for the distribution of kops - in particular for nodeup & docker images
func KopsFileUrl(file string, assetBuilder *assets.AssetBuilder) (*url.URL, *hashing.Hash, error) {
base, err := BaseUrl()
// KopsFileURL returns the base url for the distribution of kops - in particular for nodeup & docker images
func KopsFileURL(file string, assetBuilder *assets.AssetBuilder) (*url.URL, *hashing.Hash, error) {
base, err := BaseURL()
if err != nil {
return nil, nil, err
}
base.Path = path.Join(base.Path, file)
fileUrl, hash, err := assetBuilder.RemapFileAndSHA(base)
fileURL, hash, err := assetBuilder.RemapFileAndSHA(base)
if err != nil {
return nil, nil, err
}
return fileUrl, hash, nil
return fileURL, hash, nil
}
type MirroredAsset struct {
@ -216,9 +216,9 @@ type MirroredAsset struct {
// BuildMirroredAsset checks to see if this is a file under the standard base location, and if so constructs some mirror locations
func BuildMirroredAsset(u *url.URL, hash *hashing.Hash) *MirroredAsset {
baseUrlString := fmt.Sprintf(defaultKopsMirrorBase, kops.Version)
if !strings.HasSuffix(baseUrlString, "/") {
baseUrlString += "/"
baseURLString := fmt.Sprintf(defaultKopsMirrorBase, kops.Version)
if !strings.HasSuffix(baseURLString, "/") {
baseURLString += "/"
}
a := &MirroredAsset{
@ -229,11 +229,11 @@ func BuildMirroredAsset(u *url.URL, hash *hashing.Hash) *MirroredAsset {
a.Locations = []string{urlString}
// Look at mirrors
if strings.HasPrefix(urlString, baseUrlString) {
if strings.HasPrefix(urlString, baseURLString) {
if hash == nil {
klog.Warningf("not using mirrors for asset %s as it does not have a known hash", u.String())
} else {
suffix := strings.TrimPrefix(urlString, baseUrlString)
suffix := strings.TrimPrefix(urlString, baseURLString)
// This is under our base url - add our well-known mirrors
a.Locations = []string{}
for _, m := range defaultKopsMirrors {

View File

@ -159,10 +159,9 @@ func (c *Context) Render(a, e, changes Task) error {
if *lifecycle == LifecycleExistsAndValidates {
return fmt.Errorf("lifecycle set to ExistsAndValidates, but object did not match")
} else {
// Warn, but then we continue
return nil
}
// Warn, but then we continue
return nil
}
}
}

View File

@ -199,7 +199,7 @@ func (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error {
klog.V(2).Infof("Creating privateKey %q", name)
}
signer := fi.CertificateId_CA
signer := fi.CertificateIDCA
if e.Signer != nil {
signer = fi.StringValue(e.Signer.Name)
}

View File

@ -97,7 +97,7 @@ func (l *OptionsLoader) iterate(userConfig interface{}, current interface{}) (in
next := reflect.New(t).Interface()
// Copy the current state before applying rules; they act as defaults
reflectutils.JsonMergeStruct(next, current)
reflectutils.JSONMergeStruct(next, current)
for _, t := range l.templates {
klog.V(2).Infof("executing template %s (tags=%s)", t.Name, t.Tags)
@ -136,7 +136,7 @@ func (l *OptionsLoader) iterate(userConfig interface{}, current interface{}) (in
}
// Also copy the user-provided values after applying rules; they act as overrides now
reflectutils.JsonMergeStruct(next, userConfig)
reflectutils.JSONMergeStruct(next, userConfig)
return next, nil
}

View File

@ -153,7 +153,7 @@ func findFile(p string) (*File, error) {
actual.Mode = fi.String(fi.FileModeToString(stat.Mode() & os.ModePerm))
uid := int(stat.Sys().(*syscall.Stat_t).Uid)
owner, err := fi.LookupUserById(uid)
owner, err := fi.LookupUserByID(uid)
if err != nil {
return nil, err
}
@ -164,7 +164,7 @@ func findFile(p string) (*File, error) {
}
gid := int(stat.Sys().(*syscall.Stat_t).Gid)
group, err := fi.LookupGroupById(gid)
group, err := fi.LookupGroupByID(gid)
if err != nil {
return nil, err
}

View File

@ -91,7 +91,7 @@ func LookupUser(name string) (*User, error) {
return users[name], nil
}
func LookupUserById(uid int) (*User, error) {
func LookupUserByID(uid int) (*User, error) {
users, err := parseUsers()
if err != nil {
return nil, fmt.Errorf("error reading users: %v", err)
@ -155,7 +155,7 @@ func LookupGroup(name string) (*Group, error) {
return groups[name], nil
}
func LookupGroupById(gid int) (*Group, error) {
func LookupGroupByID(gid int) (*Group, error) {
users, err := parseGroups()
if err != nil {
return nil, fmt.Errorf("error reading groups: %v", err)

View File

@ -586,7 +586,7 @@ func (c *VFSCAStore) loadPrivateKeys(p vfs.Path) (*keyset, error) {
func (c *VFSCAStore) findPrivateKeyset(id string) (*keyset, error) {
var keys *keyset
var err error
if id == CertificateId_CA {
if id == CertificateIDCA {
c.mutex.Lock()
defer c.mutex.Unlock()

View File

@ -480,21 +480,21 @@ func (x *ConvertKubeupCluster) Upgrade(ctx context.Context) error {
return fmt.Errorf("error writing completed cluster spec: %v", err)
}
oldCACertPool, err := oldKeyStore.FindCertificatePool(fi.CertificateId_CA)
oldCACertPool, err := oldKeyStore.FindCertificatePool(fi.CertificateIDCA)
if err != nil {
return fmt.Errorf("error reading old CA certs: %v", err)
}
if oldCACertPool == nil {
return fmt.Errorf("cannot find certificate pool %q", fi.CertificateId_CA)
return fmt.Errorf("cannot find certificate pool %q", fi.CertificateIDCA)
}
for _, ca := range oldCACertPool.Secondary {
err := newKeyStore.AddCert(fi.CertificateId_CA, ca)
err := newKeyStore.AddCert(fi.CertificateIDCA, ca)
if err != nil {
return fmt.Errorf("error importing old CA certs: %v", err)
}
}
if oldCACertPool.Primary != nil {
err := newKeyStore.AddCert(fi.CertificateId_CA, oldCACertPool.Primary)
err := newKeyStore.AddCert(fi.CertificateIDCA, oldCACertPool.Primary)
if err != nil {
return fmt.Errorf("error importing old CA certs: %v", err)
}

View File

@ -424,7 +424,7 @@ func (x *ImportCluster) ImportAWSCluster(ctx context.Context) error {
if err != nil {
return err
}
err = keyStore.AddCert(fi.CertificateId_CA, caCert)
err = keyStore.AddCert(fi.CertificateIDCA, caCert)
if err != nil {
return err
}

View File

@ -102,7 +102,7 @@ func ValueAsString(value reflect.Value) string {
if !done {
klog.V(4).Infof("Unhandled kind in asString for %q: %T", path, v.Interface())
fmt.Fprint(b, values.DebugAsJsonString(intf))
fmt.Fprint(b, values.DebugAsJSONString(intf))
}
return SkipReflection

View File

@ -41,9 +41,9 @@ func IsMethodNotFound(err error) bool {
return ok
}
// JsonMergeStruct merges src into dest
// JSONMergeStruct merges src into dest
// It uses a JSON marshal & unmarshal, so only fields that are JSON-visible will be copied
func JsonMergeStruct(dest, src interface{}) {
func JSONMergeStruct(dest, src interface{}) {
// Not the most efficient approach, but simple & relatively well defined
j, err := json.Marshal(src)
if err != nil {