Merge pull request #14135 from hakman/go-1.19.0

Update Go to v1.19.0
This commit is contained in:
Kubernetes Prow Robot 2022-08-17 02:40:55 -07:00 committed by GitHub
commit 7035526204
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 92 additions and 71 deletions

View File

@ -19,7 +19,7 @@ jobs:
- name: Set up go - name: Set up go
uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
with: with:
go-version: 1.18.4 go-version: '1.19.0'
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with: with:
@ -36,7 +36,7 @@ jobs:
- name: Set up go - name: Set up go
uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
with: with:
go-version: 1.18.4 go-version: '1.19.0'
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with: with:
@ -53,7 +53,7 @@ jobs:
- name: Set up go - name: Set up go
uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
with: with:
go-version: 1.18.4 go-version: '1.19.0'
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with: with:
@ -70,7 +70,7 @@ jobs:
- name: Set up go - name: Set up go
uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
with: with:
go-version: 1.18.4 go-version: '1.19.0'
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
with: with:

View File

@ -19,7 +19,7 @@ jobs:
steps: steps:
- uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a - uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
with: with:
go-version: '1.18' go-version: '1.19.0'
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Update Dependencies - name: Update Dependencies
id: update_deps id: update_deps

View File

@ -41,8 +41,9 @@ type Channel struct {
// CurrentSystemGeneration holds our current SystemGeneration value. // CurrentSystemGeneration holds our current SystemGeneration value.
// Version history: // Version history:
// 0 Pre-history (and the default value); versions prior to prune. //
// 1 Prune functionality introduced. // 0 Pre-history (and the default value); versions prior to prune.
// 1 Prune functionality introduced.
const CurrentSystemGeneration = 1 const CurrentSystemGeneration = 1
type ChannelVersion struct { type ChannelVersion struct {

View File

@ -5,7 +5,7 @@ options:
machineType: 'N1_HIGHCPU_8' machineType: 'N1_HIGHCPU_8'
steps: steps:
# Push the images # Push the images
- name: 'docker.io/library/golang:1.18.4-bullseye' - name: 'docker.io/library/golang:1.19.0-bullseye'
id: images id: images
entrypoint: make entrypoint: make
env: env:
@ -20,7 +20,7 @@ steps:
- dns-controller-push - dns-controller-push
- kube-apiserver-healthcheck-push - kube-apiserver-healthcheck-push
# Push the artifacts # Push the artifacts
- name: 'docker.io/library/golang:1.18.4-bullseye' - name: 'docker.io/library/golang:1.19.0-bullseye'
id: artifacts id: artifacts
entrypoint: make entrypoint: make
env: env:
@ -35,7 +35,7 @@ steps:
args: args:
- gcs-upload-and-tag - gcs-upload-and-tag
# Build cloudbuild artifacts (for attestation) # Build cloudbuild artifacts (for attestation)
- name: 'docker.io/library/golang:1.18.4-bullseye' - name: 'docker.io/library/golang:1.19.0-bullseye'
id: cloudbuild-artifacts id: cloudbuild-artifacts
entrypoint: make entrypoint: make
env: env:

View File

@ -261,8 +261,9 @@ func (c *RootCmd) clusterNameArgsAllowNoCluster(clusterName *string) func(cmd *c
} }
// ProcessArgs will parse the positional args. It assumes one of these formats: // ProcessArgs will parse the positional args. It assumes one of these formats:
// * <no arguments at all> // - <no arguments at all>
// * <clustername> (and --name not specified) // - <clustername> (and --name not specified)
//
// Everything else is an error. // Everything else is an error.
func (c *RootCmd) ProcessArgs(args []string) error { func (c *RootCmd) ProcessArgs(args []string) error {
if len(args) > 0 { if len(args) > 0 {

View File

@ -99,14 +99,16 @@ type ResourceRecordSet interface {
Type() rrstype.RrsType Type() rrstype.RrsType
} }
/* ResourceRecordSetsEquivalent compares two ResourceRecordSets for semantic equivalence. /*
Go's equality operator doesn't work the way we want it to in this case, ResourceRecordSetsEquivalent compares two ResourceRecordSets for semantic equivalence.
hence the need for this function.
More specifically (from the Go spec): Go's equality operator doesn't work the way we want it to in this case,
"Two struct values are equal if their corresponding non-blank fields are equal." hence the need for this function.
In our case, there may be some private internal member variables that may not be not equal, More specifically (from the Go spec):
but we want the two structs to be considered equivalent anyway, if the fields exposed "Two struct values are equal if their corresponding non-blank fields are equal."
via their interfaces are equal. In our case, there may be some private internal member variables that may not be not equal,
but we want the two structs to be considered equivalent anyway, if the fields exposed
via their interfaces are equal.
*/ */
func ResourceRecordSetsEquivalent(r1, r2 ResourceRecordSet) bool { func ResourceRecordSetsEquivalent(r1, r2 ResourceRecordSet) bool {
if r1.Name() == r2.Name() && reflect.DeepEqual(r1.Rrdatas(), r2.Rrdatas()) && r1.Ttl() == r2.Ttl() && r1.Type() == r2.Type() { if r1.Name() == r2.Name() && reflect.DeepEqual(r1.Rrdatas(), r2.Rrdatas()) && r1.Ttl() == r2.Ttl() && r1.Type() == r2.Type() {

View File

@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
// AddToScheme adds all types of this clientset into the given scheme. This allows composition // AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in: // of clientsets, like in:
// //
// import ( // import (
// "k8s.io/client-go/kubernetes" // "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme" // clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// ) // )
// //
// kclientset, _ := kubernetes.NewForConfig(c) // kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// //
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly. // correctly.

View File

@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
// AddToScheme adds all types of this clientset into the given scheme. This allows composition // AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in: // of clientsets, like in:
// //
// import ( // import (
// "k8s.io/client-go/kubernetes" // "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme" // clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// ) // )
// //
// kclientset, _ := kubernetes.NewForConfig(c) // kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// //
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly. // correctly.

View File

@ -17,11 +17,11 @@ limitations under the License.
// Package featureflag implements simple feature-flagging. // Package featureflag implements simple feature-flagging.
// Feature flags can become an anti-pattern if abused. // Feature flags can become an anti-pattern if abused.
// We should try to use them for two use-cases: // We should try to use them for two use-cases:
// * `Preview` feature flags enable a piece of functionality we haven't yet fully baked. The user needs to 'opt-in'. // - `Preview` feature flags enable a piece of functionality we haven't yet fully baked. The user needs to 'opt-in'.
// We expect these flags to be removed at some time. Normally these will default to false. // We expect these flags to be removed at some time. Normally these will default to false.
// * Escape-hatch feature flags turn off a default that we consider risky (e.g. pre-creating DNS records). // - Escape-hatch feature flags turn off a default that we consider risky (e.g. pre-creating DNS records).
// This lets us ship a behaviour, and if we encounter unusual circumstances in the field, we can // This lets us ship a behaviour, and if we encounter unusual circumstances in the field, we can
// allow the user to turn the behaviour off. Normally these will default to true. // allow the user to turn the behaviour off. Normally these will default to true.
package featureflag package featureflag
import ( import (

View File

@ -1344,21 +1344,34 @@ func TestRollingUpdateDetachFails(t *testing.T) {
} }
// Request validate (1) --> // Request validate (1) -->
// <-- validated //
// <-- validated
//
// Detach instance --> // Detach instance -->
// Request validate (2) --> // Request validate (2) -->
// <-- validated //
// <-- validated
//
// Detach instance --> // Detach instance -->
// Request validate (3) --> // Request validate (3) -->
// <-- validated //
// <-- validated
//
// Request terminate 3 nodes --> // Request terminate 3 nodes -->
// <-- 3 nodes terminated, 1 left //
// <-- 3 nodes terminated, 1 left
//
// Request validate (4) --> // Request validate (4) -->
// <-- validated //
// <-- validated
//
// Request terminate 1 node --> // Request terminate 1 node -->
// <-- 1 node terminated, 0 left //
// <-- 1 node terminated, 0 left
//
// Request validate (5) --> // Request validate (5) -->
// <-- validated //
// <-- validated
type alreadyDetachedTest struct { type alreadyDetachedTest struct {
ec2iface.EC2API ec2iface.EC2API
t *testing.T t *testing.T

View File

@ -28,8 +28,8 @@ import (
// KubeObjectToApplyYAML returns the kubernetes object converted to YAML, with "noisy" fields removed. // KubeObjectToApplyYAML returns the kubernetes object converted to YAML, with "noisy" fields removed.
// //
// We remove: // We remove:
// * status (can't be applied, shouldn't be specified) // - status (can't be applied, shouldn't be specified)
// * metadata.creationTimestamp (can't be applied, shouldn't be specified) // - metadata.creationTimestamp (can't be applied, shouldn't be specified)
func KubeObjectToApplyYAML(data runtime.Object) (string, error) { func KubeObjectToApplyYAML(data runtime.Object) (string, error) {
// This logic is inlined sigs.k8s.io/yaml.Marshal, but we delete some fields in the middle. // This logic is inlined sigs.k8s.io/yaml.Marshal, but we delete some fields in the middle.

View File

@ -64,9 +64,10 @@ func (b *FirewallModelBuilder) getOctaviaProvider() string {
} }
// addDirectionalGroupRule - create a rule on the source group to the dest group provided a securityGroupRuleTask // addDirectionalGroupRule - create a rule on the source group to the dest group provided a securityGroupRuleTask
// Example //
// Create an Ingress rule on source allowing traffic from dest with the options in the SecurityGroupRule // Example
// Create an Egress rule on source allowing traffic to dest with the options in the SecurityGroupRule // Create an Ingress rule on source allowing traffic from dest with the options in the SecurityGroupRule
// Create an Egress rule on source allowing traffic to dest with the options in the SecurityGroupRule
func (b *FirewallModelBuilder) addDirectionalGroupRule(c *fi.ModelBuilderContext, source, dest *openstacktasks.SecurityGroup, sgr *openstacktasks.SecurityGroupRule) { func (b *FirewallModelBuilder) addDirectionalGroupRule(c *fi.ModelBuilderContext, source, dest *openstacktasks.SecurityGroup, sgr *openstacktasks.SecurityGroupRule) {
t := &openstacktasks.SecurityGroupRule{ t := &openstacktasks.SecurityGroupRule{
Direction: sgr.Direction, Direction: sgr.Direction,

View File

@ -40,7 +40,6 @@ import (
// PerformAssignments is called on create, as well as an update. In fact // PerformAssignments is called on create, as well as an update. In fact
// any time Run() is called in apply_cluster.go we will reach this function. // any time Run() is called in apply_cluster.go we will reach this function.
// Please do all after-market logic here. // Please do all after-market logic here.
//
func PerformAssignments(c *kops.Cluster, cloud fi.Cloud) error { func PerformAssignments(c *kops.Cluster, cloud fi.Cloud) error {
ctx := context.TODO() ctx := context.TODO()

View File

@ -69,7 +69,6 @@ func PopulateClusterSpec(clientset simple.Clientset, cluster *kopsapi.Cluster, c
return c.fullCluster, nil return c.fullCluster, nil
} }
//
// Here be dragons // Here be dragons
// //
// This function has some `interesting` things going on. // This function has some `interesting` things going on.
@ -78,7 +77,6 @@ func PopulateClusterSpec(clientset simple.Clientset, cluster *kopsapi.Cluster, c
// very wrong.. but at least now my new cluster.Spec.Topology // very wrong.. but at least now my new cluster.Spec.Topology
// struct is falling through.. // struct is falling through..
// @kris-nova // @kris-nova
//
func (c *populateClusterSpec) run(clientset simple.Clientset) error { func (c *populateClusterSpec) run(clientset simple.Clientset) error {
if errs := validation.ValidateCluster(c.InputCluster, false); len(errs) != 0 { if errs := validation.ValidateCluster(c.InputCluster, false); len(errs) != 0 {
return errs.ToAggregate() return errs.ToAggregate()

View File

@ -169,10 +169,11 @@ func writeLiteralList(body *hclwrite.Body, key string, literals []*terraformWrit
// writeMap writes a map's key-value pairs to a body spready across multiple lines. // writeMap writes a map's key-value pairs to a body spready across multiple lines.
// Example: // Example:
// key = { //
// "key1" = "value1" // key = {
// "key2" = "value2" // "key1" = "value1"
// } // "key2" = "value2"
// }
// //
// The HCL2 library does not support this natively. See https://github.com/hashicorp/hcl/issues/356 // The HCL2 library does not support this natively. See https://github.com/hashicorp/hcl/issues/356
func writeMap(body *hclwrite.Body, key string, values map[string]cty.Value) { func writeMap(body *hclwrite.Body, key string, values map[string]cty.Value) {

View File

@ -141,16 +141,19 @@ func (t *TerraformTarget) finishHCL2() error {
// writeLocalsOutputs creates the locals block and output blocks for all output variables // writeLocalsOutputs creates the locals block and output blocks for all output variables
// Example: // Example:
// locals { //
// key1 = "value1" // locals {
// key2 = "value2" // key1 = "value1"
// } // key2 = "value2"
// output "key1" { // }
// value = "value1" //
// } // output "key1" {
// output "key2" { // value = "value1"
// value = "value2" // }
// } //
// output "key2" {
// value = "value2"
// }
func writeLocalsOutputs(body *hclwrite.Body, outputs map[string]terraformWriter.OutputValue) error { func writeLocalsOutputs(body *hclwrite.Body, outputs map[string]terraformWriter.OutputValue) error {
if len(outputs) == 0 { if len(outputs) == 0 {
return nil return nil

View File

@ -75,8 +75,9 @@ func WithBackoff(backoff wait.Backoff) VFSOption {
// ReadFile reads a file from a vfs URL // ReadFile reads a file from a vfs URL
// It supports additional schemes which don't (yet) have full VFS implementations: // It supports additional schemes which don't (yet) have full VFS implementations:
// metadata: reads from instance metadata on GCE/AWS //
// http / https: reads from HTTP // metadata: reads from instance metadata on GCE/AWS
// http / https: reads from HTTP
func (c *VFSContext) ReadFile(location string, options ...VFSOption) ([]byte, error) { func (c *VFSContext) ReadFile(location string, options ...VFSOption) ([]byte, error) {
ctx := context.TODO() ctx := context.TODO()

View File

@ -40,7 +40,8 @@ import (
// https://docs.aws.amazon.com/general/latest/gr/s3.html // https://docs.aws.amazon.com/general/latest/gr/s3.html
// TODO: match fips and S3 access point naming conventions // TODO: match fips and S3 access point naming conventions
// TODO: perhaps make region regex more specific, i.e. (us|eu|ap|cn|ca|sa), to prevent matching bucket names that match region format? // TODO: perhaps make region regex more specific, i.e. (us|eu|ap|cn|ca|sa), to prevent matching bucket names that match region format?
// but that will mean updating this list when AWS introduces new regions //
// but that will mean updating this list when AWS introduces new regions
var s3UrlRegexp = regexp.MustCompile(`(s3([-.](?P<region>\w{2}(-gov)?-\w+-\d{1})|[-.](?P<bucket>[\w.\-\_]+)|)?|(?P<bucket>[\w.\-\_]+)[.]s3([.-](?P<region>\w{2}(-gov)?-\w+-\d{1}))?)[.]amazonaws[.]com([.]cn)?(?P<path>.*)?`) var s3UrlRegexp = regexp.MustCompile(`(s3([-.](?P<region>\w{2}(-gov)?-\w+-\d{1})|[-.](?P<bucket>[\w.\-\_]+)|)?|(?P<bucket>[\w.\-\_]+)[.]s3([.-](?P<region>\w{2}(-gov)?-\w+-\d{1}))?)[.]amazonaws[.]com([.]cn)?(?P<path>.*)?`)
type S3BucketDetails struct { type S3BucketDetails struct {