mirror of https://github.com/kubernetes/kops.git
commit
7035526204
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
- name: Set up go
|
||||
uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
|
||||
with:
|
||||
go-version: 1.18.4
|
||||
go-version: '1.19.0'
|
||||
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
with:
|
||||
|
|
@ -36,7 +36,7 @@ jobs:
|
|||
- name: Set up go
|
||||
uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
|
||||
with:
|
||||
go-version: 1.18.4
|
||||
go-version: '1.19.0'
|
||||
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
with:
|
||||
|
|
@ -53,7 +53,7 @@ jobs:
|
|||
- name: Set up go
|
||||
uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
|
||||
with:
|
||||
go-version: 1.18.4
|
||||
go-version: '1.19.0'
|
||||
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
with:
|
||||
|
|
@ -70,7 +70,7 @@ jobs:
|
|||
- name: Set up go
|
||||
uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
|
||||
with:
|
||||
go-version: 1.18.4
|
||||
go-version: '1.19.0'
|
||||
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
with:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.19.0'
|
||||
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
|
||||
- name: Update Dependencies
|
||||
id: update_deps
|
||||
|
|
|
|||
|
|
@ -41,8 +41,9 @@ type Channel struct {
|
|||
|
||||
// CurrentSystemGeneration holds our current SystemGeneration value.
|
||||
// Version history:
|
||||
// 0 Pre-history (and the default value); versions prior to prune.
|
||||
// 1 Prune functionality introduced.
|
||||
//
|
||||
// 0 Pre-history (and the default value); versions prior to prune.
|
||||
// 1 Prune functionality introduced.
|
||||
const CurrentSystemGeneration = 1
|
||||
|
||||
type ChannelVersion struct {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ options:
|
|||
machineType: 'N1_HIGHCPU_8'
|
||||
steps:
|
||||
# Push the images
|
||||
- name: 'docker.io/library/golang:1.18.4-bullseye'
|
||||
- name: 'docker.io/library/golang:1.19.0-bullseye'
|
||||
id: images
|
||||
entrypoint: make
|
||||
env:
|
||||
|
|
@ -20,7 +20,7 @@ steps:
|
|||
- dns-controller-push
|
||||
- kube-apiserver-healthcheck-push
|
||||
# Push the artifacts
|
||||
- name: 'docker.io/library/golang:1.18.4-bullseye'
|
||||
- name: 'docker.io/library/golang:1.19.0-bullseye'
|
||||
id: artifacts
|
||||
entrypoint: make
|
||||
env:
|
||||
|
|
@ -35,7 +35,7 @@ steps:
|
|||
args:
|
||||
- gcs-upload-and-tag
|
||||
# Build cloudbuild artifacts (for attestation)
|
||||
- name: 'docker.io/library/golang:1.18.4-bullseye'
|
||||
- name: 'docker.io/library/golang:1.19.0-bullseye'
|
||||
id: cloudbuild-artifacts
|
||||
entrypoint: make
|
||||
env:
|
||||
|
|
|
|||
|
|
@ -261,8 +261,9 @@ func (c *RootCmd) clusterNameArgsAllowNoCluster(clusterName *string) func(cmd *c
|
|||
}
|
||||
|
||||
// ProcessArgs will parse the positional args. It assumes one of these formats:
|
||||
// * <no arguments at all>
|
||||
// * <clustername> (and --name not specified)
|
||||
// - <no arguments at all>
|
||||
// - <clustername> (and --name not specified)
|
||||
//
|
||||
// Everything else is an error.
|
||||
func (c *RootCmd) ProcessArgs(args []string) error {
|
||||
if len(args) > 0 {
|
||||
|
|
|
|||
|
|
@ -99,14 +99,16 @@ type ResourceRecordSet interface {
|
|||
Type() rrstype.RrsType
|
||||
}
|
||||
|
||||
/* ResourceRecordSetsEquivalent compares two ResourceRecordSets for semantic equivalence.
|
||||
Go's equality operator doesn't work the way we want it to in this case,
|
||||
hence the need for this function.
|
||||
More specifically (from the Go spec):
|
||||
"Two struct values are equal if their corresponding non-blank fields are equal."
|
||||
In our case, there may be some private internal member variables that may not be not equal,
|
||||
but we want the two structs to be considered equivalent anyway, if the fields exposed
|
||||
via their interfaces are equal.
|
||||
/*
|
||||
ResourceRecordSetsEquivalent compares two ResourceRecordSets for semantic equivalence.
|
||||
|
||||
Go's equality operator doesn't work the way we want it to in this case,
|
||||
hence the need for this function.
|
||||
More specifically (from the Go spec):
|
||||
"Two struct values are equal if their corresponding non-blank fields are equal."
|
||||
In our case, there may be some private internal member variables that may not be not equal,
|
||||
but we want the two structs to be considered equivalent anyway, if the fields exposed
|
||||
via their interfaces are equal.
|
||||
*/
|
||||
func ResourceRecordSetsEquivalent(r1, r2 ResourceRecordSet) bool {
|
||||
if r1.Name() == r2.Name() && reflect.DeepEqual(r1.Rrdatas(), r2.Rrdatas()) && r1.Ttl() == r2.Ttl() && r1.Type() == r2.Type() {
|
||||
|
|
|
|||
|
|
@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
|
|
|||
|
|
@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
|
|
|||
|
|
@ -17,11 +17,11 @@ limitations under the License.
|
|||
// Package featureflag implements simple feature-flagging.
|
||||
// Feature flags can become an anti-pattern if abused.
|
||||
// We should try to use them for two use-cases:
|
||||
// * `Preview` feature flags enable a piece of functionality we haven't yet fully baked. The user needs to 'opt-in'.
|
||||
// We expect these flags to be removed at some time. Normally these will default to false.
|
||||
// * Escape-hatch feature flags turn off a default that we consider risky (e.g. pre-creating DNS records).
|
||||
// This lets us ship a behaviour, and if we encounter unusual circumstances in the field, we can
|
||||
// allow the user to turn the behaviour off. Normally these will default to true.
|
||||
// - `Preview` feature flags enable a piece of functionality we haven't yet fully baked. The user needs to 'opt-in'.
|
||||
// We expect these flags to be removed at some time. Normally these will default to false.
|
||||
// - Escape-hatch feature flags turn off a default that we consider risky (e.g. pre-creating DNS records).
|
||||
// This lets us ship a behaviour, and if we encounter unusual circumstances in the field, we can
|
||||
// allow the user to turn the behaviour off. Normally these will default to true.
|
||||
package featureflag
|
||||
|
||||
import (
|
||||
|
|
|
|||
|
|
@ -1344,21 +1344,34 @@ func TestRollingUpdateDetachFails(t *testing.T) {
|
|||
}
|
||||
|
||||
// Request validate (1) -->
|
||||
// <-- validated
|
||||
//
|
||||
// <-- validated
|
||||
//
|
||||
// Detach instance -->
|
||||
// Request validate (2) -->
|
||||
// <-- validated
|
||||
//
|
||||
// <-- validated
|
||||
//
|
||||
// Detach instance -->
|
||||
// Request validate (3) -->
|
||||
// <-- validated
|
||||
//
|
||||
// <-- validated
|
||||
//
|
||||
// Request terminate 3 nodes -->
|
||||
// <-- 3 nodes terminated, 1 left
|
||||
//
|
||||
// <-- 3 nodes terminated, 1 left
|
||||
//
|
||||
// Request validate (4) -->
|
||||
// <-- validated
|
||||
//
|
||||
// <-- validated
|
||||
//
|
||||
// Request terminate 1 node -->
|
||||
// <-- 1 node terminated, 0 left
|
||||
//
|
||||
// <-- 1 node terminated, 0 left
|
||||
//
|
||||
// Request validate (5) -->
|
||||
// <-- validated
|
||||
//
|
||||
// <-- validated
|
||||
type alreadyDetachedTest struct {
|
||||
ec2iface.EC2API
|
||||
t *testing.T
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ import (
|
|||
// KubeObjectToApplyYAML returns the kubernetes object converted to YAML, with "noisy" fields removed.
|
||||
//
|
||||
// We remove:
|
||||
// * status (can't be applied, shouldn't be specified)
|
||||
// * metadata.creationTimestamp (can't be applied, shouldn't be specified)
|
||||
// - status (can't be applied, shouldn't be specified)
|
||||
// - metadata.creationTimestamp (can't be applied, shouldn't be specified)
|
||||
func KubeObjectToApplyYAML(data runtime.Object) (string, error) {
|
||||
// This logic is inlined sigs.k8s.io/yaml.Marshal, but we delete some fields in the middle.
|
||||
|
||||
|
|
|
|||
|
|
@ -64,9 +64,10 @@ func (b *FirewallModelBuilder) getOctaviaProvider() string {
|
|||
}
|
||||
|
||||
// addDirectionalGroupRule - create a rule on the source group to the dest group provided a securityGroupRuleTask
|
||||
// Example
|
||||
// Create an Ingress rule on source allowing traffic from dest with the options in the SecurityGroupRule
|
||||
// Create an Egress rule on source allowing traffic to dest with the options in the SecurityGroupRule
|
||||
//
|
||||
// Example
|
||||
// Create an Ingress rule on source allowing traffic from dest with the options in the SecurityGroupRule
|
||||
// Create an Egress rule on source allowing traffic to dest with the options in the SecurityGroupRule
|
||||
func (b *FirewallModelBuilder) addDirectionalGroupRule(c *fi.ModelBuilderContext, source, dest *openstacktasks.SecurityGroup, sgr *openstacktasks.SecurityGroupRule) {
|
||||
t := &openstacktasks.SecurityGroupRule{
|
||||
Direction: sgr.Direction,
|
||||
|
|
|
|||
|
|
@ -40,7 +40,6 @@ import (
|
|||
// PerformAssignments is called on create, as well as an update. In fact
|
||||
// any time Run() is called in apply_cluster.go we will reach this function.
|
||||
// Please do all after-market logic here.
|
||||
//
|
||||
func PerformAssignments(c *kops.Cluster, cloud fi.Cloud) error {
|
||||
ctx := context.TODO()
|
||||
|
||||
|
|
|
|||
|
|
@ -69,7 +69,6 @@ func PopulateClusterSpec(clientset simple.Clientset, cluster *kopsapi.Cluster, c
|
|||
return c.fullCluster, nil
|
||||
}
|
||||
|
||||
//
|
||||
// Here be dragons
|
||||
//
|
||||
// This function has some `interesting` things going on.
|
||||
|
|
@ -78,7 +77,6 @@ func PopulateClusterSpec(clientset simple.Clientset, cluster *kopsapi.Cluster, c
|
|||
// very wrong.. but at least now my new cluster.Spec.Topology
|
||||
// struct is falling through..
|
||||
// @kris-nova
|
||||
//
|
||||
func (c *populateClusterSpec) run(clientset simple.Clientset) error {
|
||||
if errs := validation.ValidateCluster(c.InputCluster, false); len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
|
|
|
|||
|
|
@ -169,10 +169,11 @@ func writeLiteralList(body *hclwrite.Body, key string, literals []*terraformWrit
|
|||
|
||||
// writeMap writes a map's key-value pairs to a body spready across multiple lines.
|
||||
// Example:
|
||||
// key = {
|
||||
// "key1" = "value1"
|
||||
// "key2" = "value2"
|
||||
// }
|
||||
//
|
||||
// key = {
|
||||
// "key1" = "value1"
|
||||
// "key2" = "value2"
|
||||
// }
|
||||
//
|
||||
// The HCL2 library does not support this natively. See https://github.com/hashicorp/hcl/issues/356
|
||||
func writeMap(body *hclwrite.Body, key string, values map[string]cty.Value) {
|
||||
|
|
|
|||
|
|
@ -141,16 +141,19 @@ func (t *TerraformTarget) finishHCL2() error {
|
|||
|
||||
// writeLocalsOutputs creates the locals block and output blocks for all output variables
|
||||
// Example:
|
||||
// locals {
|
||||
// key1 = "value1"
|
||||
// key2 = "value2"
|
||||
// }
|
||||
// output "key1" {
|
||||
// value = "value1"
|
||||
// }
|
||||
// output "key2" {
|
||||
// value = "value2"
|
||||
// }
|
||||
//
|
||||
// locals {
|
||||
// key1 = "value1"
|
||||
// key2 = "value2"
|
||||
// }
|
||||
//
|
||||
// output "key1" {
|
||||
// value = "value1"
|
||||
// }
|
||||
//
|
||||
// output "key2" {
|
||||
// value = "value2"
|
||||
// }
|
||||
func writeLocalsOutputs(body *hclwrite.Body, outputs map[string]terraformWriter.OutputValue) error {
|
||||
if len(outputs) == 0 {
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -75,8 +75,9 @@ func WithBackoff(backoff wait.Backoff) VFSOption {
|
|||
|
||||
// ReadFile reads a file from a vfs URL
|
||||
// It supports additional schemes which don't (yet) have full VFS implementations:
|
||||
// metadata: reads from instance metadata on GCE/AWS
|
||||
// http / https: reads from HTTP
|
||||
//
|
||||
// metadata: reads from instance metadata on GCE/AWS
|
||||
// http / https: reads from HTTP
|
||||
func (c *VFSContext) ReadFile(location string, options ...VFSOption) ([]byte, error) {
|
||||
ctx := context.TODO()
|
||||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,8 @@ import (
|
|||
// https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||
// TODO: match fips and S3 access point naming conventions
|
||||
// TODO: perhaps make region regex more specific, i.e. (us|eu|ap|cn|ca|sa), to prevent matching bucket names that match region format?
|
||||
// but that will mean updating this list when AWS introduces new regions
|
||||
//
|
||||
// but that will mean updating this list when AWS introduces new regions
|
||||
var s3UrlRegexp = regexp.MustCompile(`(s3([-.](?P<region>\w{2}(-gov)?-\w+-\d{1})|[-.](?P<bucket>[\w.\-\_]+)|)?|(?P<bucket>[\w.\-\_]+)[.]s3([.-](?P<region>\w{2}(-gov)?-\w+-\d{1}))?)[.]amazonaws[.]com([.]cn)?(?P<path>.*)?`)
|
||||
|
||||
type S3BucketDetails struct {
|
||||
|
|
|
|||
Loading…
Reference in New Issue