Update vendor

This commit is contained in:
Justin Santa Barbara 2020-04-08 07:42:14 -04:00
parent 7c9c1639fd
commit b77693c324
345 changed files with 29152 additions and 9937 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
# The following Ragel file was autogenerated with unicode2ragel.rb
# The following Ragel file was autogenerated with unicode2ragel.rb
# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt
#
# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"].
@ -8,8 +8,8 @@
%%{
machine GraphemeCluster;
Prepend =
Prepend =
0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ...
| 0xDB 0x9D #Cf ARABIC END OF AYAH
| 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK
@ -19,15 +19,15 @@
| 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA...
;
CR =
CR =
0x0D #Cc <control-000D>
;
LF =
LF =
0x0A #Cc <control-000A>
;
Control =
Control =
0x00..0x09 #Cc [10] <control-0000>..<control-0009>
| 0x0B..0x0C #Cc [2] <control-000B>..<control-000C>
| 0x0E..0x1F #Cc [18] <control-000E>..<control-001F>
@ -62,7 +62,7 @@
| 0xF3 0xA0 0xBF 0x00..0xBF #
;
Extend =
Extend =
0xCC 0x80..0xFF #Mn [112] COMBINING GRAVE ACCENT..COMBINING ...
| 0xCD 0x00..0xAF #
| 0xD2 0x83..0x87 #Mn [5] COMBINING CYRILLIC TITLO..COMBININ...
@ -392,11 +392,11 @@
| 0xF3 0xA0 0x87 0x00..0xAF #
;
Regional_Indicator =
Regional_Indicator =
0xF0 0x9F 0x87 0xA6..0xBF #So [26] REGIONAL INDICATOR SYMBOL LETTE...
;
SpacingMark =
SpacingMark =
0xE0 0xA4 0x83 #Mc DEVANAGARI SIGN VISARGA
| 0xE0 0xA4 0xBB #Mc DEVANAGARI VOWEL SIGN OOE
| 0xE0 0xA4 0xBE..0xFF #Mc [3] DEVANAGARI VOWEL SIGN AA..DEVANAGA...
@ -549,26 +549,26 @@
| 0xF0 0x9D 0x85 0xAD #Mc MUSICAL SYMBOL COMBINING AUGMENTAT...
;
L =
L =
0xE1 0x84 0x80..0xFF #Lo [96] HANGUL CHOSEONG KIYEOK..HANGUL CHO...
| 0xE1 0x85 0x00..0x9F #
| 0xEA 0xA5 0xA0..0xBC #Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANG...
;
V =
V =
0xE1 0x85 0xA0..0xFF #Lo [72] HANGUL JUNGSEONG FILLER..HANGUL JU...
| 0xE1 0x86 0x00..0xA7 #
| 0xED 0x9E 0xB0..0xFF #Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUN...
| 0xED 0x9F 0x00..0x86 #
;
T =
T =
0xE1 0x86 0xA8..0xFF #Lo [88] HANGUL JONGSEONG KIYEOK..HANGUL JO...
| 0xE1 0x87 0x00..0xBF #
| 0xED 0x9F 0x8B..0xBB #Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANG...
;
LV =
LV =
0xEA 0xB0 0x80 #Lo HANGUL SYLLABLE GA
| 0xEA 0xB0 0x9C #Lo HANGUL SYLLABLE GAE
| 0xEA 0xB0 0xB8 #Lo HANGUL SYLLABLE GYA
@ -970,7 +970,7 @@
| 0xED 0x9E 0x88 #Lo HANGUL SYLLABLE HI
;
LVT =
LVT =
0xEA 0xB0 0x81..0x9B #Lo [27] HANGUL SYLLABLE GAG..HANGUL SYLLAB...
| 0xEA 0xB0 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE GAEG..HANGUL SYLLA...
| 0xEA 0xB0 0xB9..0xFF #Lo [27] HANGUL SYLLABLE GYAG..HANGUL SYLLA...
@ -1531,7 +1531,7 @@
| 0xED 0x9E 0x89..0xA3 #Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLAB...
;
E_Base =
E_Base =
0xE2 0x98 0x9D #So WHITE UP POINTING INDEX
| 0xE2 0x9B 0xB9 #So PERSON WITH BALL
| 0xE2 0x9C 0x8A..0x8D #So [4] RAISED FIST..WRITING HAND
@ -1562,21 +1562,21 @@
| 0xF0 0x9F 0xA4 0xBC..0xBE #So [3] WRESTLERS..HANDBALL
;
E_Modifier =
E_Modifier =
0xF0 0x9F 0x8F 0xBB..0xBF #Sk [5] EMOJI MODIFIER FITZPATRICK TYPE...
;
ZWJ =
ZWJ =
0xE2 0x80 0x8D #Cf ZERO WIDTH JOINER
;
Glue_After_Zwj =
Glue_After_Zwj =
0xE2 0x9D 0xA4 #So HEAVY BLACK HEART
| 0xF0 0x9F 0x92 0x8B #So KISS MARK
| 0xF0 0x9F 0x97 0xA8 #So LEFT SPEECH BUBBLE
;
E_Base_GAZ =
E_Base_GAZ =
0xF0 0x9F 0x91 0xA6..0xA9 #So [4] BOY..WOMAN
;

View File

@ -8,7 +8,7 @@
# -o, --output Write output to file
#
# Updated by: Marty Schoch <marty.schoch@gmail.com>
#
#
# This script uses the unicode spec to generate a Ragel state machine
# that recognizes unicode alphanumeric characters. It generates 5
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
@ -54,7 +54,7 @@ cli_opts = OptionParser.new do |opts|
exit
end
opts.on("-u", "--url URL", "URL to process") do |o|
@chart_url = o
@chart_url = o
end
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
machine_name = o
@ -78,7 +78,7 @@ end
# Downloads the document at url and yields every alpha line's hex
# range and description.
def each_alpha( url, property )
def each_alpha( url, property )
open( url ) do |file|
file.each_line do |line|
next if line =~ /^#/;
@ -120,7 +120,7 @@ end
# 0x00 - 0x7f -> 0zzzzzzz[7]
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
@ -216,7 +216,7 @@ def build_range( start, stop )
# Unshared prefix, end of run
return ["0x#{a}..0x#{b} "] if left.zero?
###
# Unshared prefix, not end of run
# Range can be 0x123456..0x56789A
@ -239,13 +239,13 @@ def build_range( start, stop )
###
# Don't generate last range if it is covered by first range
ret << build_range(b + "00" * left, stop) unless b == "FF"
ret.flatten!
end
def to_utf8( range )
utf8_ranges( range ).map do |r|
utf8_ranges( range ).map do |r|
begin_enc = to_utf8_enc(r.begin)
end_enc = to_utf8_enc(r.end)
build_range begin_enc, end_enc
@ -290,7 +290,7 @@ def generate_machine( name, property )
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
# is_valid? range, desc, codes
range_width = codes.map { |a| a.size }.max
@ -315,7 +315,7 @@ def generate_machine( name, property )
end
@output.puts <<EOF
# The following Ragel file was autogenerated with #{$0}
# The following Ragel file was autogenerated with #{$0}
# from: #{@chart_url}
#
# It defines #{properties}.
@ -325,7 +325,7 @@ end
%%{
machine #{machine_name};
EOF
properties.each { |x| generate_machine( x, x ) }

View File

@ -1,24 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"api.go",
"doc.go",
"errors.go",
"service.go",
],
importmap = "k8s.io/kops/vendor/github.com/aws/aws-sdk-go/service/pricing",
importpath = "github.com/aws/aws-sdk-go/service/pricing",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/signer/v4:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/private/protocol:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc:go_default_library",
],
)

File diff suppressed because it is too large Load Diff

View File

@ -1,51 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package pricing provides the client and types for making API
// requests to AWS Price List Service.
//
// AWS Price List Service API (AWS Price List Service) is a centralized and
// convenient way to programmatically query Amazon Web Services for services,
// products, and pricing information. The AWS Price List Service uses standardized
// product attributes such as Location, Storage Class, and Operating System,
// and provides prices at the SKU level. You can use the AWS Price List Service
// to build cost control and scenario planning tools, reconcile billing data,
// forecast future spend for budgeting purposes, and provide cost benefit analysis
// that compare your internal workloads with AWS.
//
// Use GetServices without a service code to retrieve the service codes for
// all AWS services, then GetServices with a service code to retreive the attribute
// names for that service. After you have the service code and attribute names,
// you can use GetAttributeValues to see what values are available for an attribute.
// With the service code and an attribute name and value, you can use GetProducts
// to find specific products that you're interested in, such as an AmazonEC2
// instance, with a Provisioned IOPS volumeType.
//
// Service Endpoint
//
// AWS Price List Service API provides the following two endpoints:
//
// * https://api.pricing.us-east-1.amazonaws.com
//
// * https://api.pricing.ap-south-1.amazonaws.com
//
// See https://docs.aws.amazon.com/goto/WebAPI/pricing-2017-10-15 for more information on this service.
//
// See pricing package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/pricing/
//
// Using the Client
//
// To contact AWS Price List Service with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the AWS Price List Service client Pricing for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/pricing/#New
package pricing

View File

@ -1,49 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package pricing
import (
"github.com/aws/aws-sdk-go/private/protocol"
)
const (
// ErrCodeExpiredNextTokenException for service response error code
// "ExpiredNextTokenException".
//
// The pagination token expired. Try again without a pagination token.
ErrCodeExpiredNextTokenException = "ExpiredNextTokenException"
// ErrCodeInternalErrorException for service response error code
// "InternalErrorException".
//
// An error on the server occurred during the processing of your request. Try
// again later.
ErrCodeInternalErrorException = "InternalErrorException"
// ErrCodeInvalidNextTokenException for service response error code
// "InvalidNextTokenException".
//
// The pagination token is invalid. Try again without a pagination token.
ErrCodeInvalidNextTokenException = "InvalidNextTokenException"
// ErrCodeInvalidParameterException for service response error code
// "InvalidParameterException".
//
// One or more parameters had an invalid value.
ErrCodeInvalidParameterException = "InvalidParameterException"
// ErrCodeNotFoundException for service response error code
// "NotFoundException".
//
// The requested resource can't be found.
ErrCodeNotFoundException = "NotFoundException"
)
var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
"ExpiredNextTokenException": newErrorExpiredNextTokenException,
"InternalErrorException": newErrorInternalErrorException,
"InvalidNextTokenException": newErrorInvalidNextTokenException,
"InvalidParameterException": newErrorInvalidParameterException,
"NotFoundException": newErrorNotFoundException,
}

View File

@ -1,106 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package pricing
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
)
// Pricing provides the API operation methods for making requests to
// AWS Price List Service. See this package's package overview docs
// for details on the service.
//
// Pricing methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type Pricing struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "api.pricing" // Name of service.
EndpointsID = ServiceName // ID to lookup a service endpoint with.
ServiceID = "Pricing" // ServiceID is a unique identifier of a specific service.
)
// New creates a new instance of the Pricing client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a Pricing client from just a session.
// svc := pricing.New(mySession)
//
// // Create a Pricing client with additional configuration
// svc := pricing.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *Pricing {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {
c.SigningName = "pricing"
}
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Pricing {
svc := &Pricing{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2017-10-15",
JSONVersion: "1.1",
TargetPrefix: "AWSPriceListService",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(
protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a Pricing operation and runs any
// custom request initialization.
func (c *Pricing) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View File

@ -91,7 +91,7 @@ io_mode = "async"
service "http" "web_proxy" {
listen_addr = "127.0.0.1:8080"
process "main" {
command = ["/usr/local/bin/awesome-app", "server"]
}

View File

@ -8,7 +8,7 @@
# -o, --output Write output to file
#
# Updated by: Marty Schoch <marty.schoch@gmail.com>
#
#
# This script uses the unicode spec to generate a Ragel state machine
# that recognizes unicode alphanumeric characters. It generates 5
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
@ -54,7 +54,7 @@ cli_opts = OptionParser.new do |opts|
exit
end
opts.on("-u", "--url URL", "URL to process") do |o|
@chart_url = o
@chart_url = o
end
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
machine_name = o
@ -78,7 +78,7 @@ end
# Downloads the document at url and yields every alpha line's hex
# range and description.
def each_alpha( url, property )
def each_alpha( url, property )
open( url ) do |file|
file.each_line do |line|
next if line =~ /^#/;
@ -120,7 +120,7 @@ end
# 0x00 - 0x7f -> 0zzzzzzz[7]
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
@ -216,7 +216,7 @@ def build_range( start, stop )
# Unshared prefix, end of run
return ["0x#{a}..0x#{b} "] if left.zero?
###
# Unshared prefix, not end of run
# Range can be 0x123456..0x56789A
@ -239,13 +239,13 @@ def build_range( start, stop )
###
# Don't generate last range if it is covered by first range
ret << build_range(b + "00" * left, stop) unless b == "FF"
ret.flatten!
end
def to_utf8( range )
utf8_ranges( range ).map do |r|
utf8_ranges( range ).map do |r|
begin_enc = to_utf8_enc(r.begin)
end_enc = to_utf8_enc(r.end)
build_range begin_enc, end_enc
@ -290,7 +290,7 @@ def generate_machine( name, property )
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
# is_valid? range, desc, codes
range_width = codes.map { |a| a.size }.max
@ -315,7 +315,7 @@ def generate_machine( name, property )
end
@output.puts <<EOF
# The following Ragel file was autogenerated with #{$0}
# The following Ragel file was autogenerated with #{$0}
# from: #{@chart_url}
#
# It defines #{properties}.
@ -325,7 +325,7 @@ end
%%{
machine #{machine_name};
EOF
properties.each { |x| generate_machine( x, x ) }

View File

@ -1,4 +1,4 @@
# The following Ragel file was autogenerated with unicode2ragel.rb
# The following Ragel file was autogenerated with unicode2ragel.rb
# from: http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt
#
# It defines ["ID_Start", "ID_Continue"].
@ -8,8 +8,8 @@
%%{
machine UnicodeDerived;
ID_Start =
ID_Start =
0x41..0x5A #L& [26] LATIN CAPITAL LETTER A..LATIN CAPI...
| 0x61..0x7A #L& [26] LATIN SMALL LETTER A..LATIN SMALL ...
| 0xC2 0xAA #Lo FEMININE ORDINAL INDICATOR
@ -807,7 +807,7 @@
| 0xF0 0xAF 0xA8 0x00..0x9D #
;
ID_Continue =
ID_Continue =
0x30..0x39 #Nd [10] DIGIT ZERO..DIGIT NINE
| 0x41..0x5A #L& [26] LATIN CAPITAL LETTER A..LATIN CAPI...
| 0x5F #Pc LOW LINE

11
vendor/github.com/liggitt/tabwriter/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,11 @@
language: go
go:
- "1.8"
- "1.9"
- "1.10"
- "1.11"
- "1.12"
- master
script: go test -v ./...

9
vendor/github.com/liggitt/tabwriter/BUILD.bazel generated vendored Normal file
View File

@ -0,0 +1,9 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["tabwriter.go"],
importmap = "k8s.io/kops/vendor/github.com/liggitt/tabwriter",
importpath = "github.com/liggitt/tabwriter",
visibility = ["//visibility:public"],
)

27
vendor/github.com/liggitt/tabwriter/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

7
vendor/github.com/liggitt/tabwriter/README.md generated vendored Normal file
View File

@ -0,0 +1,7 @@
This repo is a drop-in replacement for the golang [text/tabwriter](https://golang.org/pkg/text/tabwriter/) package.
It is based on that package at [cf2c2ea8](https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a/src/text/tabwriter) and inherits its license.
The following additional features are supported:
* `RememberWidths` flag allows remembering maximum widths seen per column even after Flush() is called.
* `RememberedWidths() []int` and `SetRememberedWidths([]int) *Writer` allows obtaining and transferring remembered column width between writers.

637
vendor/github.com/liggitt/tabwriter/tabwriter.go generated vendored Normal file
View File

@ -0,0 +1,637 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tabwriter implements a write filter (tabwriter.Writer) that
// translates tabbed columns in input into properly aligned text.
//
// It is a drop-in replacement for the golang text/tabwriter package (https://golang.org/pkg/text/tabwriter),
// based on that package at https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a
// with support for additional features.
//
// The package is using the Elastic Tabstops algorithm described at
// http://nickgravgaard.com/elastictabstops/index.html.
package tabwriter
import (
"io"
"unicode/utf8"
)
// ----------------------------------------------------------------------------
// Filter implementation
// A cell represents a segment of text terminated by tabs or line breaks.
// The text itself is stored in a separate buffer; cell only describes the
// segment's size in bytes, its width in runes, and whether it's an htab
// ('\t') terminated cell.
//
type cell struct {
size int // cell size in bytes
width int // cell width in runes
htab bool // true if the cell is terminated by an htab ('\t')
}
// A Writer is a filter that inserts padding around tab-delimited
// columns in its input to align them in the output.
//
// The Writer treats incoming bytes as UTF-8-encoded text consisting
// of cells terminated by horizontal ('\t') or vertical ('\v') tabs,
// and newline ('\n') or formfeed ('\f') characters; both newline and
// formfeed act as line breaks.
//
// Tab-terminated cells in contiguous lines constitute a column. The
// Writer inserts padding as needed to make all cells in a column have
// the same width, effectively aligning the columns. It assumes that
// all characters have the same width, except for tabs for which a
// tabwidth must be specified. Column cells must be tab-terminated, not
// tab-separated: non-tab terminated trailing text at the end of a line
// forms a cell but that cell is not part of an aligned column.
// For instance, in this example (where | stands for a horizontal tab):
//
// aaaa|bbb|d
// aa |b |dd
// a |
// aa |cccc|eee
//
// the b and c are in distinct columns (the b column is not contiguous
// all the way). The d and e are not in a column at all (there's no
// terminating tab, nor would the column be contiguous).
//
// The Writer assumes that all Unicode code points have the same width;
// this may not be true in some fonts or if the string contains combining
// characters.
//
// If DiscardEmptyColumns is set, empty columns that are terminated
// entirely by vertical (or "soft") tabs are discarded. Columns
// terminated by horizontal (or "hard") tabs are not affected by
// this flag.
//
// If a Writer is configured to filter HTML, HTML tags and entities
// are passed through. The widths of tags and entities are
// assumed to be zero (tags) and one (entities) for formatting purposes.
//
// A segment of text may be escaped by bracketing it with Escape
// characters. The tabwriter passes escaped text segments through
// unchanged. In particular, it does not interpret any tabs or line
// breaks within the segment. If the StripEscape flag is set, the
// Escape characters are stripped from the output; otherwise they
// are passed through as well. For the purpose of formatting, the
// width of the escaped text is always computed excluding the Escape
// characters.
//
// The formfeed character acts like a newline but it also terminates
// all columns in the current line (effectively calling Flush). Tab-
// terminated cells in the next line start new columns. Unless found
// inside an HTML tag or inside an escaped text segment, formfeed
// characters appear as newlines in the output.
//
// The Writer must buffer input internally, because proper spacing
// of one line may depend on the cells in future lines. Clients must
// call Flush when done calling Write.
//
type Writer struct {
// configuration
output io.Writer
minwidth int
tabwidth int
padding int
padbytes [8]byte
flags uint
// current state
buf []byte // collected text excluding tabs or line breaks
pos int // buffer position up to which cell.width of incomplete cell has been computed
cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
lines [][]cell // list of lines; each line is a list of cells
widths []int // list of column widths in runes - re-used during formatting
maxwidths []int // list of max column widths in runes
}
// addLine adds a new line.
// flushed is a hint indicating whether the underlying writer was just flushed.
// If so, the previous line is not likely to be a good indicator of the new line's cells.
func (b *Writer) addLine(flushed bool) {
// Grow slice instead of appending,
// as that gives us an opportunity
// to re-use an existing []cell.
if n := len(b.lines) + 1; n <= cap(b.lines) {
b.lines = b.lines[:n]
b.lines[n-1] = b.lines[n-1][:0]
} else {
b.lines = append(b.lines, nil)
}
if !flushed {
// The previous line is probably a good indicator
// of how many cells the current line will have.
// If the current line's capacity is smaller than that,
// abandon it and make a new one.
if n := len(b.lines); n >= 2 {
if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) {
b.lines[n-1] = make([]cell, 0, prev)
}
}
}
}
// Reset the current state.
func (b *Writer) reset() {
b.buf = b.buf[:0]
b.pos = 0
b.cell = cell{}
b.endChar = 0
b.lines = b.lines[0:0]
b.widths = b.widths[0:0]
b.addLine(true)
}
// Internal representation (current state):
//
// - all text written is appended to buf; tabs and line breaks are stripped away
// - at any given time there is a (possibly empty) incomplete cell at the end
// (the cell starts after a tab or line break)
// - cell.size is the number of bytes belonging to the cell so far
// - cell.width is text width in runes of that cell from the start of the cell to
// position pos; html tags and entities are excluded from this width if html
// filtering is enabled
// - the sizes and widths of processed text are kept in the lines list
// which contains a list of cells for each line
// - the widths list is a temporary list with current widths used during
// formatting; it is kept in Writer because it's re-used
//
// |<---------- size ---------->|
// | |
// |<- width ->|<- ignored ->| |
// | | | |
// [---processed---tab------------<tag>...</tag>...]
// ^ ^ ^
// | | |
// buf start of incomplete cell pos
// Formatting can be controlled with these flags.
const (
// Ignore html tags and treat entities (starting with '&'
// and ending in ';') as single characters (width = 1).
FilterHTML uint = 1 << iota
// Strip Escape characters bracketing escaped text segments
// instead of passing them through unchanged with the text.
StripEscape
// Force right-alignment of cell content.
// Default is left-alignment.
AlignRight
// Handle empty columns as if they were not present in
// the input in the first place.
DiscardEmptyColumns
// Always use tabs for indentation columns (i.e., padding of
// leading empty cells on the left) independent of padchar.
TabIndent
// Print a vertical bar ('|') between columns (after formatting).
// Discarded columns appear as zero-width columns ("||").
Debug
// Remember maximum widths seen per column even after Flush() is called.
RememberWidths
)
// A Writer must be initialized with a call to Init. The first parameter (output)
// specifies the filter output. The remaining parameters control the formatting:
//
// minwidth minimal cell width including any padding
// tabwidth width of tab characters (equivalent number of spaces)
// padding padding added to a cell before computing its width
// padchar ASCII char used for padding
// if padchar == '\t', the Writer will assume that the
// width of a '\t' in the formatted output is tabwidth,
// and cells are left-aligned independent of align_left
// (for correct-looking results, tabwidth must correspond
// to the tab width in the viewer displaying the result)
// flags formatting control
//
func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
if minwidth < 0 || tabwidth < 0 || padding < 0 {
panic("negative minwidth, tabwidth, or padding")
}
b.output = output
b.minwidth = minwidth
b.tabwidth = tabwidth
b.padding = padding
for i := range b.padbytes {
b.padbytes[i] = padchar
}
if padchar == '\t' {
// tab padding enforces left-alignment
flags &^= AlignRight
}
b.flags = flags
b.reset()
return b
}
// debugging support (keep code around)
func (b *Writer) dump() {
pos := 0
for i, line := range b.lines {
print("(", i, ") ")
for _, c := range line {
print("[", string(b.buf[pos:pos+c.size]), "]")
pos += c.size
}
print("\n")
}
print("\n")
}
// local error wrapper so we can distinguish errors we want to return
// as errors from genuine panics (which we don't want to return as errors)
type osError struct {
err error
}
func (b *Writer) write0(buf []byte) {
n, err := b.output.Write(buf)
if n != len(buf) && err == nil {
err = io.ErrShortWrite
}
if err != nil {
panic(osError{err})
}
}
func (b *Writer) writeN(src []byte, n int) {
for n > len(src) {
b.write0(src)
n -= len(src)
}
b.write0(src[0:n])
}
var (
newline = []byte{'\n'}
tabs = []byte("\t\t\t\t\t\t\t\t")
)
func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
if b.padbytes[0] == '\t' || useTabs {
// padding is done with tabs
if b.tabwidth == 0 {
return // tabs have no width - can't do any padding
}
// make cellw the smallest multiple of b.tabwidth
cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
n := cellw - textw // amount of padding
if n < 0 {
panic("internal error")
}
b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
return
}
// padding is done with non-tab characters
b.writeN(b.padbytes[0:], cellw-textw)
}
var vbar = []byte{'|'}
func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
pos = pos0
for i := line0; i < line1; i++ {
line := b.lines[i]
// if TabIndent is set, use tabs to pad leading empty cells
useTabs := b.flags&TabIndent != 0
for j, c := range line {
if j > 0 && b.flags&Debug != 0 {
// indicate column break
b.write0(vbar)
}
if c.size == 0 {
// empty cell
if j < len(b.widths) {
b.writePadding(c.width, b.widths[j], useTabs)
}
} else {
// non-empty cell
useTabs = false
if b.flags&AlignRight == 0 { // align left
b.write0(b.buf[pos : pos+c.size])
pos += c.size
if j < len(b.widths) {
b.writePadding(c.width, b.widths[j], false)
}
} else { // align right
if j < len(b.widths) {
b.writePadding(c.width, b.widths[j], false)
}
b.write0(b.buf[pos : pos+c.size])
pos += c.size
}
}
}
if i+1 == len(b.lines) {
// last buffered line - we don't have a newline, so just write
// any outstanding buffered data
b.write0(b.buf[pos : pos+b.cell.size])
pos += b.cell.size
} else {
// not the last line - write newline
b.write0(newline)
}
}
return
}
// Format the text between line0 and line1 (excluding line1); pos
// is the buffer position corresponding to the beginning of line0.
// Returns the buffer position corresponding to the beginning of
// line1 and an error, if any.
//
func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
pos = pos0
column := len(b.widths)
for this := line0; this < line1; this++ {
line := b.lines[this]
if column >= len(line)-1 {
continue
}
// cell exists in this column => this line
// has more cells than the previous line
// (the last cell per line is ignored because cells are
// tab-terminated; the last cell per line describes the
// text before the newline/formfeed and does not belong
// to a column)
// print unprinted lines until beginning of block
pos = b.writeLines(pos, line0, this)
line0 = this
// column block begin
width := b.minwidth // minimal column width
discardable := true // true if all cells in this column are empty and "soft"
for ; this < line1; this++ {
line = b.lines[this]
if column >= len(line)-1 {
break
}
// cell exists in this column
c := line[column]
// update width
if w := c.width + b.padding; w > width {
width = w
}
// update discardable
if c.width > 0 || c.htab {
discardable = false
}
}
// column block end
// discard empty columns if necessary
if discardable && b.flags&DiscardEmptyColumns != 0 {
width = 0
}
if b.flags&RememberWidths != 0 {
if len(b.maxwidths) < len(b.widths) {
b.maxwidths = append(b.maxwidths, b.widths[len(b.maxwidths):]...)
}
switch {
case len(b.maxwidths) == len(b.widths):
b.maxwidths = append(b.maxwidths, width)
case b.maxwidths[len(b.widths)] > width:
width = b.maxwidths[len(b.widths)]
case b.maxwidths[len(b.widths)] < width:
b.maxwidths[len(b.widths)] = width
}
}
// format and print all columns to the right of this column
// (we know the widths of this column and all columns to the left)
b.widths = append(b.widths, width) // push width
pos = b.format(pos, line0, this)
b.widths = b.widths[0 : len(b.widths)-1] // pop width
line0 = this
}
// print unprinted lines until end
return b.writeLines(pos, line0, line1)
}
// Append text to current cell.
func (b *Writer) append(text []byte) {
b.buf = append(b.buf, text...)
b.cell.size += len(text)
}
// Update the cell width.
func (b *Writer) updateWidth() {
b.cell.width += utf8.RuneCount(b.buf[b.pos:])
b.pos = len(b.buf)
}
// To escape a text segment, bracket it with Escape characters.
// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
// does not terminate a cell and constitutes a single character of
// width one for formatting purposes.
//
// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
//
const Escape = '\xff'
// Start escaped mode.
func (b *Writer) startEscape(ch byte) {
switch ch {
case Escape:
b.endChar = Escape
case '<':
b.endChar = '>'
case '&':
b.endChar = ';'
}
}
// Terminate escaped mode. If the escaped text was an HTML tag, its width
// is assumed to be zero for formatting purposes; if it was an HTML entity,
// its width is assumed to be one. In all other cases, the width is the
// unicode width of the text.
//
func (b *Writer) endEscape() {
switch b.endChar {
case Escape:
b.updateWidth()
if b.flags&StripEscape == 0 {
b.cell.width -= 2 // don't count the Escape chars
}
case '>': // tag of zero width
case ';':
b.cell.width++ // entity, count as one rune
}
b.pos = len(b.buf)
b.endChar = 0
}
// Terminate the current cell by adding it to the list of cells of the
// current line. Returns the number of cells in that line.
//
func (b *Writer) terminateCell(htab bool) int {
b.cell.htab = htab
line := &b.lines[len(b.lines)-1]
*line = append(*line, b.cell)
b.cell = cell{}
return len(*line)
}
func handlePanic(err *error, op string) {
if e := recover(); e != nil {
if nerr, ok := e.(osError); ok {
*err = nerr.err
return
}
panic("tabwriter: panic during " + op)
}
}
// RememberedWidths returns a copy of the remembered per-column maximum widths.
// Requires use of the RememberWidths flag, and is not threadsafe.
func (b *Writer) RememberedWidths() []int {
retval := make([]int, len(b.maxwidths))
copy(retval, b.maxwidths)
return retval
}
// SetRememberedWidths sets the remembered per-column maximum widths.
// Requires use of the RememberWidths flag, and is not threadsafe.
func (b *Writer) SetRememberedWidths(widths []int) *Writer {
b.maxwidths = make([]int, len(widths))
copy(b.maxwidths, widths)
return b
}
// Flush should be called after the last call to Write to ensure
// that any data buffered in the Writer is written to output. Any
// incomplete escape sequence at the end is considered
// complete for formatting purposes.
func (b *Writer) Flush() error {
return b.flush()
}
func (b *Writer) flush() (err error) {
defer b.reset() // even in the presence of errors
defer handlePanic(&err, "Flush")
// add current cell if not empty
if b.cell.size > 0 {
if b.endChar != 0 {
// inside escape - terminate it even if incomplete
b.endEscape()
}
b.terminateCell(false)
}
// format contents of buffer
b.format(0, 0, len(b.lines))
return nil
}
var hbar = []byte("---\n")
// Write writes buf to the writer b.
// The only errors returned are ones encountered
// while writing to the underlying output stream.
//
func (b *Writer) Write(buf []byte) (n int, err error) {
defer handlePanic(&err, "Write")
// split text into cells
n = 0
for i, ch := range buf {
if b.endChar == 0 {
// outside escape
switch ch {
case '\t', '\v', '\n', '\f':
// end of cell
b.append(buf[n:i])
b.updateWidth()
n = i + 1 // ch consumed
ncells := b.terminateCell(ch == '\t')
if ch == '\n' || ch == '\f' {
// terminate line
b.addLine(ch == '\f')
if ch == '\f' || ncells == 1 {
// A '\f' always forces a flush. Otherwise, if the previous
// line has only one cell which does not have an impact on
// the formatting of the following lines (the last cell per
// line is ignored by format()), thus we can flush the
// Writer contents.
if err = b.Flush(); err != nil {
return
}
if ch == '\f' && b.flags&Debug != 0 {
// indicate section break
b.write0(hbar)
}
}
}
case Escape:
// start of escaped sequence
b.append(buf[n:i])
b.updateWidth()
n = i
if b.flags&StripEscape != 0 {
n++ // strip Escape
}
b.startEscape(Escape)
case '<', '&':
// possibly an html tag/entity
if b.flags&FilterHTML != 0 {
// begin of tag/entity
b.append(buf[n:i])
b.updateWidth()
n = i
b.startEscape(ch)
}
}
} else {
// inside escape
if ch == b.endChar {
// end of tag/entity
j := i + 1
if ch == Escape && b.flags&StripEscape != 0 {
j = i // strip Escape
}
b.append(buf[n:j])
n = i + 1 // ch consumed
b.endEscape()
}
}
}
// append leftover text
b.append(buf[n:])
n = len(buf)
return
}
// NewWriter allocates and initializes a new tabwriter.Writer.
// The parameters are the same as for the Init function.
//
func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
}

25
vendor/golang.org/x/crypto/blake2b/BUILD.bazel generated vendored Normal file
View File

@ -0,0 +1,25 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"blake2b.go",
"blake2bAVX2_amd64.go",
"blake2bAVX2_amd64.s",
"blake2b_amd64.go",
"blake2b_amd64.s",
"blake2b_generic.go",
"blake2b_ref.go",
"blake2x.go",
"register.go",
],
importmap = "k8s.io/kops/vendor/golang.org/x/crypto/blake2b",
importpath = "golang.org/x/crypto/blake2b",
visibility = ["//visibility:public"],
deps = select({
"@io_bazel_rules_go//go/platform:amd64": [
"//vendor/golang.org/x/sys/cpu:go_default_library",
],
"//conditions:default": [],
}),
)

289
vendor/golang.org/x/crypto/blake2b/blake2b.go generated vendored Normal file
View File

@ -0,0 +1,289 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693
// and the extendable output function (XOF) BLAKE2Xb.
//
// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf
// and for BLAKE2Xb see https://blake2.net/blake2x.pdf
//
// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512).
// If you need a secret-key MAC (message authentication code), use the New512
// function with a non-nil key.
//
// BLAKE2X is a construction to compute hash values larger than 64 bytes. It
// can produce hash values between 0 and 4 GiB.
package blake2b
import (
"encoding/binary"
"errors"
"hash"
)
const (
// The blocksize of BLAKE2b in bytes.
BlockSize = 128
// The hash size of BLAKE2b-512 in bytes.
Size = 64
// The hash size of BLAKE2b-384 in bytes.
Size384 = 48
// The hash size of BLAKE2b-256 in bytes.
Size256 = 32
)
var (
useAVX2 bool
useAVX bool
useSSE4 bool
)
var (
errKeySize = errors.New("blake2b: invalid key size")
errHashSize = errors.New("blake2b: invalid hash size")
)
var iv = [8]uint64{
0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
}
// Sum512 returns the BLAKE2b-512 checksum of the data.
func Sum512(data []byte) [Size]byte {
var sum [Size]byte
checkSum(&sum, Size, data)
return sum
}
// Sum384 returns the BLAKE2b-384 checksum of the data.
func Sum384(data []byte) [Size384]byte {
var sum [Size]byte
var sum384 [Size384]byte
checkSum(&sum, Size384, data)
copy(sum384[:], sum[:Size384])
return sum384
}
// Sum256 returns the BLAKE2b-256 checksum of the data.
func Sum256(data []byte) [Size256]byte {
var sum [Size]byte
var sum256 [Size256]byte
checkSum(&sum, Size256, data)
copy(sum256[:], sum[:Size256])
return sum256
}
// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil
// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) }
// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil
// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) }
// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil
// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) }
// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length.
// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long.
// The hash size can be a value between 1 and 64 but it is highly recommended to use
// values equal or greater than:
// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long).
// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long).
// When the key is nil, the returned hash.Hash implements BinaryMarshaler
// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash.
func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) }
func newDigest(hashSize int, key []byte) (*digest, error) {
if hashSize < 1 || hashSize > Size {
return nil, errHashSize
}
if len(key) > Size {
return nil, errKeySize
}
d := &digest{
size: hashSize,
keyLen: len(key),
}
copy(d.key[:], key)
d.Reset()
return d, nil
}
func checkSum(sum *[Size]byte, hashSize int, data []byte) {
h := iv
h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24)
var c [2]uint64
if length := len(data); length > BlockSize {
n := length &^ (BlockSize - 1)
if length == n {
n -= BlockSize
}
hashBlocks(&h, &c, 0, data[:n])
data = data[n:]
}
var block [BlockSize]byte
offset := copy(block[:], data)
remaining := uint64(BlockSize - offset)
if c[0] < remaining {
c[1]--
}
c[0] -= remaining
hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
for i, v := range h[:(hashSize+7)/8] {
binary.LittleEndian.PutUint64(sum[8*i:], v)
}
}
type digest struct {
h [8]uint64
c [2]uint64
size int
block [BlockSize]byte
offset int
key [BlockSize]byte
keyLen int
}
const (
magic = "b2b"
marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1
)
func (d *digest) MarshalBinary() ([]byte, error) {
if d.keyLen != 0 {
return nil, errors.New("crypto/blake2b: cannot marshal MACs")
}
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
for i := 0; i < 8; i++ {
b = appendUint64(b, d.h[i])
}
b = appendUint64(b, d.c[0])
b = appendUint64(b, d.c[1])
// Maximum value for size is 64
b = append(b, byte(d.size))
b = append(b, d.block[:]...)
b = append(b, byte(d.offset))
return b, nil
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("crypto/blake2b: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/blake2b: invalid hash state size")
}
b = b[len(magic):]
for i := 0; i < 8; i++ {
b, d.h[i] = consumeUint64(b)
}
b, d.c[0] = consumeUint64(b)
b, d.c[1] = consumeUint64(b)
d.size = int(b[0])
b = b[1:]
copy(d.block[:], b[:BlockSize])
b = b[BlockSize:]
d.offset = int(b[0])
return nil
}
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Size() int { return d.size }
func (d *digest) Reset() {
d.h = iv
d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24)
d.offset, d.c[0], d.c[1] = 0, 0, 0
if d.keyLen > 0 {
d.block = d.key
d.offset = BlockSize
}
}
func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
if d.offset > 0 {
remaining := BlockSize - d.offset
if n <= remaining {
d.offset += copy(d.block[d.offset:], p)
return
}
copy(d.block[d.offset:], p[:remaining])
hashBlocks(&d.h, &d.c, 0, d.block[:])
d.offset = 0
p = p[remaining:]
}
if length := len(p); length > BlockSize {
nn := length &^ (BlockSize - 1)
if length == nn {
nn -= BlockSize
}
hashBlocks(&d.h, &d.c, 0, p[:nn])
p = p[nn:]
}
if len(p) > 0 {
d.offset += copy(d.block[:], p)
}
return
}
func (d *digest) Sum(sum []byte) []byte {
var hash [Size]byte
d.finalize(&hash)
return append(sum, hash[:d.size]...)
}
func (d *digest) finalize(hash *[Size]byte) {
var block [BlockSize]byte
copy(block[:], d.block[:d.offset])
remaining := uint64(BlockSize - d.offset)
c := d.c
if c[0] < remaining {
c[1]--
}
c[0] -= remaining
h := d.h
hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
for i, v := range h {
binary.LittleEndian.PutUint64(hash[8*i:], v)
}
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.BigEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func appendUint32(b []byte, x uint32) []byte {
var a [4]byte
binary.BigEndian.PutUint32(a[:], x)
return append(b, a[:]...)
}
func consumeUint64(b []byte) ([]byte, uint64) {
x := binary.BigEndian.Uint64(b)
return b[8:], x
}
func consumeUint32(b []byte) ([]byte, uint32) {
x := binary.BigEndian.Uint32(b)
return b[4:], x
}

View File

@ -0,0 +1,37 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,amd64,!gccgo,!appengine
package blake2b
import "golang.org/x/sys/cpu"
func init() {
useAVX2 = cpu.X86.HasAVX2
useAVX = cpu.X86.HasAVX
useSSE4 = cpu.X86.HasSSE41
}
//go:noescape
func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
//go:noescape
func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
//go:noescape
func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
switch {
case useAVX2:
hashBlocksAVX2(h, c, flag, blocks)
case useAVX:
hashBlocksAVX(h, c, flag, blocks)
case useSSE4:
hashBlocksSSE4(h, c, flag, blocks)
default:
hashBlocksGeneric(h, c, flag, blocks)
}
}

750
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s generated vendored Normal file
View File

@ -0,0 +1,750 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,amd64,!gccgo,!appengine
#include "textflag.h"
DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
VPADDQ m0, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m1, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y1_Y1; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y3_Y3; \
VPADDQ m2, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m3, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y3_Y3; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y1_Y1
#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
// load msg: Y12 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y12, Y12
// load msg: Y13 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
VMOVQ_SI_X13(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X13(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y13, Y13
// load msg: Y14 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
VMOVQ_SI_X14(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X14(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y14, Y14
// load msg: Y15 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
VMOVQ_SI_X15(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X15(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X11(6*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
VMOVQ_SI_X11(11*8); \
VPSHUFD $0x4E, 0*8(SI), X14; \
VPINSRQ_1_SI_X11(5*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
VMOVQ_SI_X11(5*8); \
VMOVDQU 11*8(SI), X12; \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y12, Y12; \
VMOVQ_SI_X13(8*8); \
VMOVQ_SI_X11(2*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X11(13*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
VMOVQ_SI_X15(6*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X15(10*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X13(7*8); \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
VMOVQ_SI_X14_0; \
VPSHUFD $0x4E, 8*8(SI), X11; \
VPINSRQ_1_SI_X14(6*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
VMOVQ_SI_X15_0; \
VMOVQ_SI_X11(6*8); \
VPINSRQ_1_SI_X15(4*8); \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
VMOVQ_SI_X12(6*8); \
VMOVQ_SI_X11(11*8); \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
VMOVQ_SI_X11(1*8); \
VMOVDQU 12*8(SI), X14; \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y14, Y14; \
VMOVQ_SI_X15(2*8); \
VMOVDQU 4*8(SI), X11; \
VPINSRQ_1_SI_X15(7*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
VMOVQ_SI_X13(2*8); \
VPSHUFD $0x4E, 5*8(SI), X11; \
VPINSRQ_1_SI_X13(4*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
VMOVQ_SI_X15(11*8); \
VMOVQ_SI_X11(12*8); \
VPINSRQ_1_SI_X15(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y15, Y15
// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
MOVQ h+0(FP), AX
MOVQ c+8(FP), BX
MOVQ flag+16(FP), CX
MOVQ blocks_base+24(FP), SI
MOVQ blocks_len+32(FP), DI
MOVQ SP, DX
MOVQ SP, R9
ADDQ $31, R9
ANDQ $~31, R9
MOVQ R9, SP
MOVQ CX, 16(SP)
XORQ CX, CX
MOVQ CX, 24(SP)
VMOVDQU ·AVX2_c40<>(SB), Y4
VMOVDQU ·AVX2_c48<>(SB), Y5
VMOVDQU 0(AX), Y8
VMOVDQU 32(AX), Y9
VMOVDQU ·AVX2_iv0<>(SB), Y6
VMOVDQU ·AVX2_iv1<>(SB), Y7
MOVQ 0(BX), R8
MOVQ 8(BX), R9
MOVQ R9, 8(SP)
loop:
ADDQ $128, R8
MOVQ R8, 0(SP)
CMPQ R8, $128
JGE noinc
INCQ R9
MOVQ R9, 8(SP)
noinc:
VMOVDQA Y8, Y0
VMOVDQA Y9, Y1
VMOVDQA Y6, Y2
VPXOR 0(SP), Y7, Y3
LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
VMOVDQA Y12, 32(SP)
VMOVDQA Y13, 64(SP)
VMOVDQA Y14, 96(SP)
VMOVDQA Y15, 128(SP)
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
VMOVDQA Y12, 160(SP)
VMOVDQA Y13, 192(SP)
VMOVDQA Y14, 224(SP)
VMOVDQA Y15, 256(SP)
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
VPXOR Y0, Y8, Y8
VPXOR Y1, Y9, Y9
VPXOR Y2, Y8, Y8
VPXOR Y3, Y9, Y9
LEAQ 128(SI), SI
SUBQ $128, DI
JNE loop
MOVQ R8, 0(BX)
MOVQ R9, 8(BX)
VMOVDQU Y8, 0(AX)
VMOVDQU Y9, 32(AX)
VZEROUPPER
MOVQ DX, SP
RET
#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
#define SHUFFLE_AVX() \
VMOVDQA X6, X13; \
VMOVDQA X2, X14; \
VMOVDQA X4, X6; \
VPUNPCKLQDQ_X13_X13_X15; \
VMOVDQA X5, X4; \
VMOVDQA X6, X5; \
VPUNPCKHQDQ_X15_X7_X6; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X13_X7; \
VPUNPCKLQDQ_X3_X3_X15; \
VPUNPCKHQDQ_X15_X2_X2; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X3_X3; \
#define SHUFFLE_AVX_INV() \
VMOVDQA X2, X13; \
VMOVDQA X4, X14; \
VPUNPCKLQDQ_X2_X2_X15; \
VMOVDQA X5, X4; \
VPUNPCKHQDQ_X15_X3_X2; \
VMOVDQA X14, X5; \
VPUNPCKLQDQ_X3_X3_X15; \
VMOVDQA X6, X14; \
VPUNPCKHQDQ_X15_X13_X3; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X6_X6; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X7_X7; \
#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
VPADDQ m0, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m1, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFD $-79, v6, v6; \
VPSHUFD $-79, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPSHUFB c40, v2, v2; \
VPSHUFB c40, v3, v3; \
VPADDQ m2, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m3, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFB c48, v6, v6; \
VPSHUFB c48, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPADDQ v2, v2, t0; \
VPSRLQ $63, v2, v2; \
VPXOR t0, v2, v2; \
VPADDQ v3, v3, t0; \
VPSRLQ $63, v3, v3; \
VPXOR t0, v3, v3
// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X13(i2*8); \
VMOVQ_SI_X14(i4*8); \
VMOVQ_SI_X15(i6*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X13(i3*8); \
VPINSRQ_1_SI_X14(i5*8); \
VPINSRQ_1_SI_X15(i7*8)
// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(1*8); \
VMOVQ_SI_X15(5*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X13(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(7*8)
// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
VPSHUFD $0x4E, 0*8(SI), X12; \
VMOVQ_SI_X13(11*8); \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(7*8); \
VPINSRQ_1_SI_X13(5*8); \
VPINSRQ_1_SI_X14(2*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
VMOVDQU 11*8(SI), X12; \
VMOVQ_SI_X13(5*8); \
VMOVQ_SI_X14(8*8); \
VMOVQ_SI_X15(2*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14_0; \
VPINSRQ_1_SI_X15(13*8)
// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(6*8); \
VMOVQ_SI_X15_0; \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
VMOVQ_SI_X12(9*8); \
VMOVQ_SI_X13(2*8); \
VMOVQ_SI_X14_0; \
VMOVQ_SI_X15(4*8); \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VPINSRQ_1_SI_X15(15*8)
// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(11*8); \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X13(8*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
MOVQ 0*8(SI), X12; \
VPSHUFD $0x4E, 8*8(SI), X13; \
MOVQ 7*8(SI), X14; \
MOVQ 2*8(SI), X15; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(11*8)
// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
MOVQ 6*8(SI), X12; \
MOVQ 11*8(SI), X13; \
MOVQ 15*8(SI), X14; \
MOVQ 3*8(SI), X15; \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X14(9*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
MOVQ 5*8(SI), X12; \
MOVQ 8*8(SI), X13; \
MOVQ 0*8(SI), X14; \
MOVQ 6*8(SI), X15; \
VPINSRQ_1_SI_X12(15*8); \
VPINSRQ_1_SI_X13(2*8); \
VPINSRQ_1_SI_X14(4*8); \
VPINSRQ_1_SI_X15(10*8)
// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
VMOVDQU 12*8(SI), X12; \
MOVQ 1*8(SI), X13; \
MOVQ 2*8(SI), X14; \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VMOVDQU 4*8(SI), X15
// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
MOVQ 15*8(SI), X12; \
MOVQ 3*8(SI), X13; \
MOVQ 11*8(SI), X14; \
MOVQ 12*8(SI), X15; \
VPINSRQ_1_SI_X12(9*8); \
VPINSRQ_1_SI_X13(13*8); \
VPINSRQ_1_SI_X14(14*8); \
VPINSRQ_1_SI_X15_0
// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
MOVQ h+0(FP), AX
MOVQ c+8(FP), BX
MOVQ flag+16(FP), CX
MOVQ blocks_base+24(FP), SI
MOVQ blocks_len+32(FP), DI
MOVQ SP, BP
MOVQ SP, R9
ADDQ $15, R9
ANDQ $~15, R9
MOVQ R9, SP
VMOVDQU ·AVX_c40<>(SB), X0
VMOVDQU ·AVX_c48<>(SB), X1
VMOVDQA X0, X8
VMOVDQA X1, X9
VMOVDQU ·AVX_iv3<>(SB), X0
VMOVDQA X0, 0(SP)
XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
VMOVDQU 0(AX), X10
VMOVDQU 16(AX), X11
VMOVDQU 32(AX), X2
VMOVDQU 48(AX), X3
MOVQ 0(BX), R8
MOVQ 8(BX), R9
loop:
ADDQ $128, R8
CMPQ R8, $128
JGE noinc
INCQ R9
noinc:
VMOVQ_R8_X15
VPINSRQ_1_R9_X15
VMOVDQA X10, X0
VMOVDQA X11, X1
VMOVDQU ·AVX_iv0<>(SB), X4
VMOVDQU ·AVX_iv1<>(SB), X5
VMOVDQU ·AVX_iv2<>(SB), X6
VPXOR X15, X6, X6
VMOVDQA 0(SP), X7
LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
VMOVDQA X12, 16(SP)
VMOVDQA X13, 32(SP)
VMOVDQA X14, 48(SP)
VMOVDQA X15, 64(SP)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
VMOVDQA X12, 80(SP)
VMOVDQA X13, 96(SP)
VMOVDQA X14, 112(SP)
VMOVDQA X15, 128(SP)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
VMOVDQA X12, 144(SP)
VMOVDQA X13, 160(SP)
VMOVDQA X14, 176(SP)
VMOVDQA X15, 192(SP)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
VMOVDQA X12, 208(SP)
VMOVDQA X13, 224(SP)
VMOVDQA X14, 240(SP)
VMOVDQA X15, 256(SP)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9)
SHUFFLE_AVX()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9)
SHUFFLE_AVX_INV()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9)
SHUFFLE_AVX()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9)
SHUFFLE_AVX_INV()
VMOVDQU 32(AX), X14
VMOVDQU 48(AX), X15
VPXOR X0, X10, X10
VPXOR X1, X11, X11
VPXOR X2, X14, X14
VPXOR X3, X15, X15
VPXOR X4, X10, X10
VPXOR X5, X11, X11
VPXOR X6, X14, X2
VPXOR X7, X15, X3
VMOVDQU X2, 32(AX)
VMOVDQU X3, 48(AX)
LEAQ 128(SI), SI
SUBQ $128, DI
JNE loop
VMOVDQU X10, 0(AX)
VMOVDQU X11, 16(AX)
MOVQ R8, 0(BX)
MOVQ R9, 8(BX)
VZEROUPPER
MOVQ BP, SP
RET

24
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7,amd64,!gccgo,!appengine
package blake2b
import "golang.org/x/sys/cpu"
func init() {
useSSE4 = cpu.X86.HasSSE41
}
//go:noescape
func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
if useSSE4 {
hashBlocksSSE4(h, c, flag, blocks)
} else {
hashBlocksGeneric(h, c, flag, blocks)
}
}

281
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s generated vendored Normal file
View File

@ -0,0 +1,281 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
#include "textflag.h"
DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v6, t1; \
PUNPCKLQDQ v6, t2; \
PUNPCKHQDQ v7, v6; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ v7, t2; \
MOVO t1, v7; \
MOVO v2, t1; \
PUNPCKHQDQ t2, v7; \
PUNPCKLQDQ v3, t2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v3
#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v2, t1; \
PUNPCKLQDQ v2, t2; \
PUNPCKHQDQ v3, v2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ v3, t2; \
MOVO t1, v3; \
MOVO v6, t1; \
PUNPCKHQDQ t2, v3; \
PUNPCKLQDQ v7, t2; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v7
#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
PADDQ m0, v0; \
PADDQ m1, v1; \
PADDQ v2, v0; \
PADDQ v3, v1; \
PXOR v0, v6; \
PXOR v1, v7; \
PSHUFD $0xB1, v6, v6; \
PSHUFD $0xB1, v7, v7; \
PADDQ v6, v4; \
PADDQ v7, v5; \
PXOR v4, v2; \
PXOR v5, v3; \
PSHUFB c40, v2; \
PSHUFB c40, v3; \
PADDQ m2, v0; \
PADDQ m3, v1; \
PADDQ v2, v0; \
PADDQ v3, v1; \
PXOR v0, v6; \
PXOR v1, v7; \
PSHUFB c48, v6; \
PSHUFB c48, v7; \
PADDQ v6, v4; \
PADDQ v7, v5; \
PXOR v4, v2; \
PXOR v5, v3; \
MOVOU v2, t0; \
PADDQ v2, t0; \
PSRLQ $63, v2; \
PXOR t0, v2; \
MOVOU v3, t0; \
PADDQ v3, t0; \
PSRLQ $63, v3; \
PXOR t0, v3
#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \
MOVQ i0*8(src), m0; \
PINSRQ $1, i1*8(src), m0; \
MOVQ i2*8(src), m1; \
PINSRQ $1, i3*8(src), m1; \
MOVQ i4*8(src), m2; \
PINSRQ $1, i5*8(src), m2; \
MOVQ i6*8(src), m3; \
PINSRQ $1, i7*8(src), m3
// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
MOVQ h+0(FP), AX
MOVQ c+8(FP), BX
MOVQ flag+16(FP), CX
MOVQ blocks_base+24(FP), SI
MOVQ blocks_len+32(FP), DI
MOVQ SP, BP
MOVQ SP, R9
ADDQ $15, R9
ANDQ $~15, R9
MOVQ R9, SP
MOVOU ·iv3<>(SB), X0
MOVO X0, 0(SP)
XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0)
MOVOU ·c40<>(SB), X13
MOVOU ·c48<>(SB), X14
MOVOU 0(AX), X12
MOVOU 16(AX), X15
MOVQ 0(BX), R8
MOVQ 8(BX), R9
loop:
ADDQ $128, R8
CMPQ R8, $128
JGE noinc
INCQ R9
noinc:
MOVQ R8, X8
PINSRQ $1, R9, X8
MOVO X12, X0
MOVO X15, X1
MOVOU 32(AX), X2
MOVOU 48(AX), X3
MOVOU ·iv0<>(SB), X4
MOVOU ·iv1<>(SB), X5
MOVOU ·iv2<>(SB), X6
PXOR X8, X6
MOVO 0(SP), X7
LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
MOVO X8, 16(SP)
MOVO X9, 32(SP)
MOVO X10, 48(SP)
MOVO X11, 64(SP)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
MOVO X8, 80(SP)
MOVO X9, 96(SP)
MOVO X10, 112(SP)
MOVO X11, 128(SP)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
MOVO X8, 144(SP)
MOVO X9, 160(SP)
MOVO X10, 176(SP)
MOVO X11, 192(SP)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
MOVO X8, 208(SP)
MOVO X9, 224(SP)
MOVO X10, 240(SP)
MOVO X11, 256(SP)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
MOVOU 32(AX), X10
MOVOU 48(AX), X11
PXOR X0, X12
PXOR X1, X15
PXOR X2, X10
PXOR X3, X11
PXOR X4, X12
PXOR X5, X15
PXOR X6, X10
PXOR X7, X11
MOVOU X10, 32(AX)
MOVOU X11, 48(AX)
LEAQ 128(SI), SI
SUBQ $128, DI
JNE loop
MOVOU X12, 0(AX)
MOVOU X15, 16(AX)
MOVQ R8, 0(BX)
MOVQ R9, 8(BX)
MOVQ BP, SP
RET

182
vendor/golang.org/x/crypto/blake2b/blake2b_generic.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blake2b
import (
"encoding/binary"
"math/bits"
)
// the precomputed values for BLAKE2b
// there are 12 16-byte arrays - one for each round
// the entries are calculated from the sigma constants.
var precomputed = [12][16]byte{
{0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15},
{14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3},
{11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4},
{7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8},
{9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13},
{2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9},
{12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11},
{13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10},
{6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5},
{10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0},
{0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first
{14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second
}
func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
var m [16]uint64
c0, c1 := c[0], c[1]
for i := 0; i < len(blocks); {
c0 += BlockSize
if c0 < BlockSize {
c1++
}
v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7]
v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7]
v12 ^= c0
v13 ^= c1
v14 ^= flag
for j := range m {
m[j] = binary.LittleEndian.Uint64(blocks[i:])
i += 8
}
for j := range precomputed {
s := &(precomputed[j])
v0 += m[s[0]]
v0 += v4
v12 ^= v0
v12 = bits.RotateLeft64(v12, -32)
v8 += v12
v4 ^= v8
v4 = bits.RotateLeft64(v4, -24)
v1 += m[s[1]]
v1 += v5
v13 ^= v1
v13 = bits.RotateLeft64(v13, -32)
v9 += v13
v5 ^= v9
v5 = bits.RotateLeft64(v5, -24)
v2 += m[s[2]]
v2 += v6
v14 ^= v2
v14 = bits.RotateLeft64(v14, -32)
v10 += v14
v6 ^= v10
v6 = bits.RotateLeft64(v6, -24)
v3 += m[s[3]]
v3 += v7
v15 ^= v3
v15 = bits.RotateLeft64(v15, -32)
v11 += v15
v7 ^= v11
v7 = bits.RotateLeft64(v7, -24)
v0 += m[s[4]]
v0 += v4
v12 ^= v0
v12 = bits.RotateLeft64(v12, -16)
v8 += v12
v4 ^= v8
v4 = bits.RotateLeft64(v4, -63)
v1 += m[s[5]]
v1 += v5
v13 ^= v1
v13 = bits.RotateLeft64(v13, -16)
v9 += v13
v5 ^= v9
v5 = bits.RotateLeft64(v5, -63)
v2 += m[s[6]]
v2 += v6
v14 ^= v2
v14 = bits.RotateLeft64(v14, -16)
v10 += v14
v6 ^= v10
v6 = bits.RotateLeft64(v6, -63)
v3 += m[s[7]]
v3 += v7
v15 ^= v3
v15 = bits.RotateLeft64(v15, -16)
v11 += v15
v7 ^= v11
v7 = bits.RotateLeft64(v7, -63)
v0 += m[s[8]]
v0 += v5
v15 ^= v0
v15 = bits.RotateLeft64(v15, -32)
v10 += v15
v5 ^= v10
v5 = bits.RotateLeft64(v5, -24)
v1 += m[s[9]]
v1 += v6
v12 ^= v1
v12 = bits.RotateLeft64(v12, -32)
v11 += v12
v6 ^= v11
v6 = bits.RotateLeft64(v6, -24)
v2 += m[s[10]]
v2 += v7
v13 ^= v2
v13 = bits.RotateLeft64(v13, -32)
v8 += v13
v7 ^= v8
v7 = bits.RotateLeft64(v7, -24)
v3 += m[s[11]]
v3 += v4
v14 ^= v3
v14 = bits.RotateLeft64(v14, -32)
v9 += v14
v4 ^= v9
v4 = bits.RotateLeft64(v4, -24)
v0 += m[s[12]]
v0 += v5
v15 ^= v0
v15 = bits.RotateLeft64(v15, -16)
v10 += v15
v5 ^= v10
v5 = bits.RotateLeft64(v5, -63)
v1 += m[s[13]]
v1 += v6
v12 ^= v1
v12 = bits.RotateLeft64(v12, -16)
v11 += v12
v6 ^= v11
v6 = bits.RotateLeft64(v6, -63)
v2 += m[s[14]]
v2 += v7
v13 ^= v2
v13 = bits.RotateLeft64(v13, -16)
v8 += v13
v7 ^= v8
v7 = bits.RotateLeft64(v7, -63)
v3 += m[s[15]]
v3 += v4
v14 ^= v3
v14 = bits.RotateLeft64(v14, -16)
v9 += v14
v4 ^= v9
v4 = bits.RotateLeft64(v4, -63)
}
h[0] ^= v0 ^ v8
h[1] ^= v1 ^ v9
h[2] ^= v2 ^ v10
h[3] ^= v3 ^ v11
h[4] ^= v4 ^ v12
h[5] ^= v5 ^ v13
h[6] ^= v6 ^ v14
h[7] ^= v7 ^ v15
}
c[0], c[1] = c0, c1
}

11
vendor/golang.org/x/crypto/blake2b/blake2b_ref.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine gccgo
package blake2b
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
hashBlocksGeneric(h, c, flag, blocks)
}

177
vendor/golang.org/x/crypto/blake2b/blake2x.go generated vendored Normal file
View File

@ -0,0 +1,177 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blake2b
import (
"encoding/binary"
"errors"
"io"
)
// XOF defines the interface to hash functions that
// support arbitrary-length output.
type XOF interface {
// Write absorbs more data into the hash's state. It panics if called
// after Read.
io.Writer
// Read reads more output from the hash. It returns io.EOF if the limit
// has been reached.
io.Reader
// Clone returns a copy of the XOF in its current state.
Clone() XOF
// Reset resets the XOF to its initial state.
Reset()
}
// OutputLengthUnknown can be used as the size argument to NewXOF to indicate
// the length of the output is not known in advance.
const OutputLengthUnknown = 0
// magicUnknownOutputLength is a magic value for the output size that indicates
// an unknown number of output bytes.
const magicUnknownOutputLength = (1 << 32) - 1
// maxOutputLength is the absolute maximum number of bytes to produce when the
// number of output bytes is unknown.
const maxOutputLength = (1 << 32) * 64
// NewXOF creates a new variable-output-length hash. The hash either produce a
// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes
// (size == OutputLengthUnknown). In the latter case, an absolute limit of
// 256GiB applies.
//
// A non-nil key turns the hash into a MAC. The key must between
// zero and 32 bytes long.
func NewXOF(size uint32, key []byte) (XOF, error) {
if len(key) > Size {
return nil, errKeySize
}
if size == magicUnknownOutputLength {
// 2^32-1 indicates an unknown number of bytes and thus isn't a
// valid length.
return nil, errors.New("blake2b: XOF length too large")
}
if size == OutputLengthUnknown {
size = magicUnknownOutputLength
}
x := &xof{
d: digest{
size: Size,
keyLen: len(key),
},
length: size,
}
copy(x.d.key[:], key)
x.Reset()
return x, nil
}
type xof struct {
d digest
length uint32
remaining uint64
cfg, root, block [Size]byte
offset int
nodeOffset uint32
readMode bool
}
func (x *xof) Write(p []byte) (n int, err error) {
if x.readMode {
panic("blake2b: write to XOF after read")
}
return x.d.Write(p)
}
func (x *xof) Clone() XOF {
clone := *x
return &clone
}
func (x *xof) Reset() {
x.cfg[0] = byte(Size)
binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length
binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length
x.cfg[17] = byte(Size) // inner hash size
x.d.Reset()
x.d.h[1] ^= uint64(x.length) << 32
x.remaining = uint64(x.length)
if x.remaining == magicUnknownOutputLength {
x.remaining = maxOutputLength
}
x.offset, x.nodeOffset = 0, 0
x.readMode = false
}
func (x *xof) Read(p []byte) (n int, err error) {
if !x.readMode {
x.d.finalize(&x.root)
x.readMode = true
}
if x.remaining == 0 {
return 0, io.EOF
}
n = len(p)
if uint64(n) > x.remaining {
n = int(x.remaining)
p = p[:n]
}
if x.offset > 0 {
blockRemaining := Size - x.offset
if n < blockRemaining {
x.offset += copy(p, x.block[x.offset:])
x.remaining -= uint64(n)
return
}
copy(p, x.block[x.offset:])
p = p[blockRemaining:]
x.offset = 0
x.remaining -= uint64(blockRemaining)
}
for len(p) >= Size {
binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
x.nodeOffset++
x.d.initConfig(&x.cfg)
x.d.Write(x.root[:])
x.d.finalize(&x.block)
copy(p, x.block[:])
p = p[Size:]
x.remaining -= uint64(Size)
}
if todo := len(p); todo > 0 {
if x.remaining < uint64(Size) {
x.cfg[0] = byte(x.remaining)
}
binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
x.nodeOffset++
x.d.initConfig(&x.cfg)
x.d.Write(x.root[:])
x.d.finalize(&x.block)
x.offset = copy(p, x.block[:todo])
x.remaining -= uint64(todo)
}
return
}
func (d *digest) initConfig(cfg *[Size]byte) {
d.offset, d.c[0], d.c[1] = 0, 0, 0
for i := range d.h {
d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:])
}
}

32
vendor/golang.org/x/crypto/blake2b/register.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package blake2b
import (
"crypto"
"hash"
)
func init() {
newHash256 := func() hash.Hash {
h, _ := New256(nil)
return h
}
newHash384 := func() hash.Hash {
h, _ := New384(nil)
return h
}
newHash512 := func() hash.Hash {
h, _ := New512(nil)
return h
}
crypto.RegisterHash(crypto.BLAKE2b_256, newHash256)
crypto.RegisterHash(crypto.BLAKE2b_384, newHash384)
crypto.RegisterHash(crypto.BLAKE2b_512, newHash512)
}

13
vendor/golang.org/x/crypto/blowfish/BUILD.bazel generated vendored Normal file
View File

@ -0,0 +1,13 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"block.go",
"cipher.go",
"const.go",
],
importmap = "k8s.io/kops/vendor/golang.org/x/crypto/blowfish",
importpath = "golang.org/x/crypto/blowfish",
visibility = ["//visibility:public"],
)

159
vendor/golang.org/x/crypto/blowfish/block.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blowfish
// getNextWord returns the next big-endian uint32 value from the byte slice
// at the given position in a circular manner, updating the position.
func getNextWord(b []byte, pos *int) uint32 {
var w uint32
j := *pos
for i := 0; i < 4; i++ {
w = w<<8 | uint32(b[j])
j++
if j >= len(b) {
j = 0
}
}
*pos = j
return w
}
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
// pi and substitution tables for calls to Encrypt. This is used, primarily,
// by the bcrypt package to reuse the Blowfish key schedule during its
// set up. It's unlikely that you need to use this directly.
func ExpandKey(key []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
// Using inlined getNextWord for performance.
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
// This is similar to ExpandKey, but folds the salt during the key
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
// and specializing it here is useful.
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
c.p[i] ^= getNextWord(key, &j)
}
j = 0
var l, r uint32
for i := 0; i < 18; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[0]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
xr ^= c.p[17]
return xr, xl
}
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[17]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
xr ^= c.p[0]
return xr, xl
}

99
vendor/golang.org/x/crypto/blowfish/cipher.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
//
// Blowfish is a legacy cipher and its short block size makes it vulnerable to
// birthday bound attacks (see https://sweet32.info). It should only be used
// where compatibility with legacy systems, not security, is the goal.
//
// Deprecated: any new system should use AES (from crypto/aes, if necessary in
// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
// golang.org/x/crypto/chacha20poly1305).
package blowfish // import "golang.org/x/crypto/blowfish"
// The code is a port of Bruce Schneier's C implementation.
// See https://www.schneier.com/blowfish.html.
import "strconv"
// The Blowfish block size in bytes.
const BlockSize = 8
// A Cipher is an instance of Blowfish encryption using a particular key.
type Cipher struct {
p [18]uint32
s0, s1, s2, s3 [256]uint32
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a Cipher.
// The key argument should be the Blowfish key, from 1 to 56 bytes.
func NewCipher(key []byte) (*Cipher, error) {
var result Cipher
if k := len(key); k < 1 || k > 56 {
return nil, KeySizeError(k)
}
initCipher(&result)
ExpandKey(key, &result)
return &result, nil
}
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
// sufficient and desirable. For bcrypt compatibility, the key can be over 56
// bytes.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
if len(salt) == 0 {
return NewCipher(key)
}
var result Cipher
if k := len(key); k < 1 {
return nil, KeySizeError(k)
}
initCipher(&result)
expandKeyWithSalt(key, salt, &result)
return &result, nil
}
// BlockSize returns the Blowfish block size, 8 bytes.
// It is necessary to satisfy the Block interface in the
// package "crypto/cipher".
func (c *Cipher) BlockSize() int { return BlockSize }
// Encrypt encrypts the 8-byte buffer src using the key k
// and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = encryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
// Decrypt decrypts the 8-byte buffer src using the key k
// and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = decryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
func initCipher(c *Cipher) {
copy(c.p[0:], p[0:])
copy(c.s0[0:], s0[0:])
copy(c.s1[0:], s1[0:])
copy(c.s2[0:], s2[0:])
copy(c.s3[0:], s3[0:])
}

199
vendor/golang.org/x/crypto/blowfish/const.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The startup permutation array and substitution boxes.
// They are the hexadecimal digits of PI; see:
// https://www.schneier.com/code/constants.txt.
package blowfish
var s0 = [256]uint32{
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
}
var s1 = [256]uint32{
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
}
var s2 = [256]uint32{
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
}
var s3 = [256]uint32{
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
}
var p = [18]uint32{
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
}

View File

@ -7,6 +7,7 @@ go_library(
importpath = "golang.org/x/crypto/nacl/box",
visibility = ["//visibility:public"],
deps = [
"//vendor/golang.org/x/crypto/blake2b:go_default_library",
"//vendor/golang.org/x/crypto/curve25519:go_default_library",
"//vendor/golang.org/x/crypto/nacl/secretbox:go_default_library",
"//vendor/golang.org/x/crypto/salsa20/salsa:go_default_library",

View File

@ -31,19 +31,30 @@ Thus large amounts of data should be chunked so that each message is small.
chunk size.
This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html.
Anonymous sealing/opening is an extension of NaCl defined by and interoperable
with libsodium:
https://libsodium.gitbook.io/doc/public-key_cryptography/sealed_boxes.
*/
package box // import "golang.org/x/crypto/nacl/box"
import (
cryptorand "crypto/rand"
"io"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/salsa20/salsa"
)
// Overhead is the number of bytes of overhead when boxing a message.
const Overhead = secretbox.Overhead
const (
// Overhead is the number of bytes of overhead when boxing a message.
Overhead = secretbox.Overhead
// AnonymousOverhead is the number of bytes of overhead when using anonymous
// sealed boxes.
AnonymousOverhead = Overhead + 32
)
// GenerateKey generates a new public/private key pair suitable for use with
// Seal and Open.
@ -101,3 +112,71 @@ func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte
func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) {
return secretbox.Open(out, box, nonce, sharedKey)
}
// SealAnonymous appends an encrypted and authenticated copy of message to out,
// which will be AnonymousOverhead bytes longer than the original and must not
// overlap it. This differs from Seal in that the sender is not required to
// provide a private key.
func SealAnonymous(out, message []byte, recipient *[32]byte, rand io.Reader) ([]byte, error) {
if rand == nil {
rand = cryptorand.Reader
}
ephemeralPub, ephemeralPriv, err := GenerateKey(rand)
if err != nil {
return nil, err
}
var nonce [24]byte
if err := sealNonce(ephemeralPub, recipient, &nonce); err != nil {
return nil, err
}
if total := len(out) + AnonymousOverhead + len(message); cap(out) < total {
original := out
out = make([]byte, 0, total)
out = append(out, original...)
}
out = append(out, ephemeralPub[:]...)
return Seal(out, message, &nonce, recipient, ephemeralPriv), nil
}
// OpenAnonymous authenticates and decrypts a box produced by SealAnonymous and
// appends the message to out, which must not overlap box. The output will be
// AnonymousOverhead bytes smaller than box.
func OpenAnonymous(out, box []byte, publicKey, privateKey *[32]byte) (message []byte, ok bool) {
if len(box) < AnonymousOverhead {
return nil, false
}
var ephemeralPub [32]byte
copy(ephemeralPub[:], box[:32])
var nonce [24]byte
if err := sealNonce(&ephemeralPub, publicKey, &nonce); err != nil {
return nil, false
}
return Open(out, box[32:], &nonce, &ephemeralPub, privateKey)
}
// sealNonce generates a 24 byte nonce that is a blake2b digest of the
// ephemeral public key and the receiver's public key.
func sealNonce(ephemeralPub, peersPublicKey *[32]byte, nonce *[24]byte) error {
h, err := blake2b.New(24, nil)
if err != nil {
return err
}
if _, err = h.Write(ephemeralPub[:]); err != nil {
return err
}
if _, err = h.Write(peersPublicKey[:]); err != nil {
return err
}
h.Sum(nonce[:0])
return nil
}

View File

@ -9,8 +9,6 @@ go_library(
"poly1305.go",
"sum_amd64.go",
"sum_amd64.s",
"sum_arm.go",
"sum_arm.s",
"sum_generic.go",
"sum_noasm.go",
"sum_ppc64le.go",

View File

@ -1,19 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build arm,!gccgo,!appengine,!nacl
package poly1305
// poly1305_auth_armv6 is implemented in sum_arm.s
//go:noescape
func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
func sum(out *[16]byte, m []byte, key *[32]byte) {
var mPtr *byte
if len(m) > 0 {
mPtr = &m[0]
}
poly1305_auth_armv6(out, mPtr, uint32(len(m)), key)
}

View File

@ -1,427 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build arm,!gccgo,!appengine,!nacl
#include "textflag.h"
// This code was translated into a form compatible with 5a from the public
// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20
// Warning: the linker may use R11 to synthesize certain instructions. Please
// take care and verify that no synthetic instructions use it.
TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0
// Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It
// might look like it's only 60 bytes of space but the final four bytes
// will be written by another function.) We need to skip over four
// bytes of stack because that's saving the value of 'g'.
ADD $4, R13, R8
MOVM.IB [R4-R7], (R8)
MOVM.IA.W (R1), [R2-R5]
MOVW $·poly1305_init_constants_armv6<>(SB), R7
MOVW R2, R8
MOVW R2>>26, R9
MOVW R3>>20, g
MOVW R4>>14, R11
MOVW R5>>8, R12
ORR R3<<6, R9, R9
ORR R4<<12, g, g
ORR R5<<18, R11, R11
MOVM.IA (R7), [R2-R6]
AND R8, R2, R2
AND R9, R3, R3
AND g, R4, R4
AND R11, R5, R5
AND R12, R6, R6
MOVM.IA.W [R2-R6], (R0)
EOR R2, R2, R2
EOR R3, R3, R3
EOR R4, R4, R4
EOR R5, R5, R5
EOR R6, R6, R6
MOVM.IA.W [R2-R6], (R0)
MOVM.IA.W (R1), [R2-R5]
MOVM.IA [R2-R6], (R0)
ADD $20, R13, R0
MOVM.DA (R0), [R4-R7]
RET
#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
MOVBU (offset+0)(Rsrc), Rtmp; \
MOVBU Rtmp, (offset+0)(Rdst); \
MOVBU (offset+1)(Rsrc), Rtmp; \
MOVBU Rtmp, (offset+1)(Rdst); \
MOVBU (offset+2)(Rsrc), Rtmp; \
MOVBU Rtmp, (offset+2)(Rdst); \
MOVBU (offset+3)(Rsrc), Rtmp; \
MOVBU Rtmp, (offset+3)(Rdst)
TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0
// Needs 24 bytes of stack for saved registers and then 88 bytes of
// scratch space after that. We assume that 24 bytes at (R13) have
// already been used: four bytes for the link register saved in the
// prelude of poly1305_auth_armv6, four bytes for saving the value of g
// in that function and 16 bytes of scratch space used around
// poly1305_finish_ext_armv6_skip1.
ADD $24, R13, R12
MOVM.IB [R4-R8, R14], (R12)
MOVW R0, 88(R13)
MOVW R1, 92(R13)
MOVW R2, 96(R13)
MOVW R1, R14
MOVW R2, R12
MOVW 56(R0), R8
WORD $0xe1180008 // TST R8, R8 not working see issue 5921
EOR R6, R6, R6
MOVW.EQ $(1<<24), R6
MOVW R6, 84(R13)
ADD $116, R13, g
MOVM.IA (R0), [R0-R9]
MOVM.IA [R0-R4], (g)
CMP $16, R12
BLO poly1305_blocks_armv6_done
poly1305_blocks_armv6_mainloop:
WORD $0xe31e0003 // TST R14, #3 not working see issue 5921
BEQ poly1305_blocks_armv6_mainloop_aligned
ADD $100, R13, g
MOVW_UNALIGNED(R14, g, R0, 0)
MOVW_UNALIGNED(R14, g, R0, 4)
MOVW_UNALIGNED(R14, g, R0, 8)
MOVW_UNALIGNED(R14, g, R0, 12)
MOVM.IA (g), [R0-R3]
ADD $16, R14
B poly1305_blocks_armv6_mainloop_loaded
poly1305_blocks_armv6_mainloop_aligned:
MOVM.IA.W (R14), [R0-R3]
poly1305_blocks_armv6_mainloop_loaded:
MOVW R0>>26, g
MOVW R1>>20, R11
MOVW R2>>14, R12
MOVW R14, 92(R13)
MOVW R3>>8, R4
ORR R1<<6, g, g
ORR R2<<12, R11, R11
ORR R3<<18, R12, R12
BIC $0xfc000000, R0, R0
BIC $0xfc000000, g, g
MOVW 84(R13), R3
BIC $0xfc000000, R11, R11
BIC $0xfc000000, R12, R12
ADD R0, R5, R5
ADD g, R6, R6
ORR R3, R4, R4
ADD R11, R7, R7
ADD $116, R13, R14
ADD R12, R8, R8
ADD R4, R9, R9
MOVM.IA (R14), [R0-R4]
MULLU R4, R5, (R11, g)
MULLU R3, R5, (R14, R12)
MULALU R3, R6, (R11, g)
MULALU R2, R6, (R14, R12)
MULALU R2, R7, (R11, g)
MULALU R1, R7, (R14, R12)
ADD R4<<2, R4, R4
ADD R3<<2, R3, R3
MULALU R1, R8, (R11, g)
MULALU R0, R8, (R14, R12)
MULALU R0, R9, (R11, g)
MULALU R4, R9, (R14, R12)
MOVW g, 76(R13)
MOVW R11, 80(R13)
MOVW R12, 68(R13)
MOVW R14, 72(R13)
MULLU R2, R5, (R11, g)
MULLU R1, R5, (R14, R12)
MULALU R1, R6, (R11, g)
MULALU R0, R6, (R14, R12)
MULALU R0, R7, (R11, g)
MULALU R4, R7, (R14, R12)
ADD R2<<2, R2, R2
ADD R1<<2, R1, R1
MULALU R4, R8, (R11, g)
MULALU R3, R8, (R14, R12)
MULALU R3, R9, (R11, g)
MULALU R2, R9, (R14, R12)
MOVW g, 60(R13)
MOVW R11, 64(R13)
MOVW R12, 52(R13)
MOVW R14, 56(R13)
MULLU R0, R5, (R11, g)
MULALU R4, R6, (R11, g)
MULALU R3, R7, (R11, g)
MULALU R2, R8, (R11, g)
MULALU R1, R9, (R11, g)
ADD $52, R13, R0
MOVM.IA (R0), [R0-R7]
MOVW g>>26, R12
MOVW R4>>26, R14
ORR R11<<6, R12, R12
ORR R5<<6, R14, R14
BIC $0xfc000000, g, g
BIC $0xfc000000, R4, R4
ADD.S R12, R0, R0
ADC $0, R1, R1
ADD.S R14, R6, R6
ADC $0, R7, R7
MOVW R0>>26, R12
MOVW R6>>26, R14
ORR R1<<6, R12, R12
ORR R7<<6, R14, R14
BIC $0xfc000000, R0, R0
BIC $0xfc000000, R6, R6
ADD R14<<2, R14, R14
ADD.S R12, R2, R2
ADC $0, R3, R3
ADD R14, g, g
MOVW R2>>26, R12
MOVW g>>26, R14
ORR R3<<6, R12, R12
BIC $0xfc000000, g, R5
BIC $0xfc000000, R2, R7
ADD R12, R4, R4
ADD R14, R0, R0
MOVW R4>>26, R12
BIC $0xfc000000, R4, R8
ADD R12, R6, R9
MOVW 96(R13), R12
MOVW 92(R13), R14
MOVW R0, R6
CMP $32, R12
SUB $16, R12, R12
MOVW R12, 96(R13)
BHS poly1305_blocks_armv6_mainloop
poly1305_blocks_armv6_done:
MOVW 88(R13), R12
MOVW R5, 20(R12)
MOVW R6, 24(R12)
MOVW R7, 28(R12)
MOVW R8, 32(R12)
MOVW R9, 36(R12)
ADD $48, R13, R0
MOVM.DA (R0), [R4-R8, R14]
RET
#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
MOVBU.P 1(Rsrc), Rtmp; \
MOVBU.P Rtmp, 1(Rdst); \
MOVBU.P 1(Rsrc), Rtmp; \
MOVBU.P Rtmp, 1(Rdst)
#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
TEXT ·poly1305_auth_armv6(SB), $196-16
// The value 196, just above, is the sum of 64 (the size of the context
// structure) and 132 (the amount of stack needed).
//
// At this point, the stack pointer (R13) has been moved down. It
// points to the saved link register and there's 196 bytes of free
// space above it.
//
// The stack for this function looks like:
//
// +---------------------
// |
// | 64 bytes of context structure
// |
// +---------------------
// |
// | 112 bytes for poly1305_blocks_armv6
// |
// +---------------------
// | 16 bytes of final block, constructed at
// | poly1305_finish_ext_armv6_skip8
// +---------------------
// | four bytes of saved 'g'
// +---------------------
// | lr, saved by prelude <- R13 points here
// +---------------------
MOVW g, 4(R13)
MOVW out+0(FP), R4
MOVW m+4(FP), R5
MOVW mlen+8(FP), R6
MOVW key+12(FP), R7
ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112
MOVW R7, R1
// poly1305_init_ext_armv6 will write to the stack from R13+4, but
// that's ok because none of the other values have been written yet.
BL poly1305_init_ext_armv6<>(SB)
BIC.S $15, R6, R2
BEQ poly1305_auth_armv6_noblocks
ADD $136, R13, R0
MOVW R5, R1
ADD R2, R5, R5
SUB R2, R6, R6
BL poly1305_blocks_armv6<>(SB)
poly1305_auth_armv6_noblocks:
ADD $136, R13, R0
MOVW R5, R1
MOVW R6, R2
MOVW R4, R3
MOVW R0, R5
MOVW R1, R6
MOVW R2, R7
MOVW R3, R8
AND.S R2, R2, R2
BEQ poly1305_finish_ext_armv6_noremaining
EOR R0, R0
ADD $8, R13, R9 // 8 = offset to 16 byte scratch space
MOVW R0, (R9)
MOVW R0, 4(R9)
MOVW R0, 8(R9)
MOVW R0, 12(R9)
WORD $0xe3110003 // TST R1, #3 not working see issue 5921
BEQ poly1305_finish_ext_armv6_aligned
WORD $0xe3120008 // TST R2, #8 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip8
MOVWP_UNALIGNED(R1, R9, g)
MOVWP_UNALIGNED(R1, R9, g)
poly1305_finish_ext_armv6_skip8:
WORD $0xe3120004 // TST $4, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip4
MOVWP_UNALIGNED(R1, R9, g)
poly1305_finish_ext_armv6_skip4:
WORD $0xe3120002 // TST $2, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip2
MOVHUP_UNALIGNED(R1, R9, g)
B poly1305_finish_ext_armv6_skip2
poly1305_finish_ext_armv6_aligned:
WORD $0xe3120008 // TST R2, #8 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip8_aligned
MOVM.IA.W (R1), [g-R11]
MOVM.IA.W [g-R11], (R9)
poly1305_finish_ext_armv6_skip8_aligned:
WORD $0xe3120004 // TST $4, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip4_aligned
MOVW.P 4(R1), g
MOVW.P g, 4(R9)
poly1305_finish_ext_armv6_skip4_aligned:
WORD $0xe3120002 // TST $2, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip2
MOVHU.P 2(R1), g
MOVH.P g, 2(R9)
poly1305_finish_ext_armv6_skip2:
WORD $0xe3120001 // TST $1, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip1
MOVBU.P 1(R1), g
MOVBU.P g, 1(R9)
poly1305_finish_ext_armv6_skip1:
MOVW $1, R11
MOVBU R11, 0(R9)
MOVW R11, 56(R5)
MOVW R5, R0
ADD $8, R13, R1
MOVW $16, R2
BL poly1305_blocks_armv6<>(SB)
poly1305_finish_ext_armv6_noremaining:
MOVW 20(R5), R0
MOVW 24(R5), R1
MOVW 28(R5), R2
MOVW 32(R5), R3
MOVW 36(R5), R4
MOVW R4>>26, R12
BIC $0xfc000000, R4, R4
ADD R12<<2, R12, R12
ADD R12, R0, R0
MOVW R0>>26, R12
BIC $0xfc000000, R0, R0
ADD R12, R1, R1
MOVW R1>>26, R12
BIC $0xfc000000, R1, R1
ADD R12, R2, R2
MOVW R2>>26, R12
BIC $0xfc000000, R2, R2
ADD R12, R3, R3
MOVW R3>>26, R12
BIC $0xfc000000, R3, R3
ADD R12, R4, R4
ADD $5, R0, R6
MOVW R6>>26, R12
BIC $0xfc000000, R6, R6
ADD R12, R1, R7
MOVW R7>>26, R12
BIC $0xfc000000, R7, R7
ADD R12, R2, g
MOVW g>>26, R12
BIC $0xfc000000, g, g
ADD R12, R3, R11
MOVW $-(1<<26), R12
ADD R11>>26, R12, R12
BIC $0xfc000000, R11, R11
ADD R12, R4, R9
MOVW R9>>31, R12
SUB $1, R12
AND R12, R6, R6
AND R12, R7, R7
AND R12, g, g
AND R12, R11, R11
AND R12, R9, R9
MVN R12, R12
AND R12, R0, R0
AND R12, R1, R1
AND R12, R2, R2
AND R12, R3, R3
AND R12, R4, R4
ORR R6, R0, R0
ORR R7, R1, R1
ORR g, R2, R2
ORR R11, R3, R3
ORR R9, R4, R4
ORR R1<<26, R0, R0
MOVW R1>>6, R1
ORR R2<<20, R1, R1
MOVW R2>>12, R2
ORR R3<<14, R2, R2
MOVW R3>>18, R3
ORR R4<<8, R3, R3
MOVW 40(R5), R6
MOVW 44(R5), R7
MOVW 48(R5), g
MOVW 52(R5), R11
ADD.S R6, R0, R0
ADC.S R7, R1, R1
ADC.S g, R2, R2
ADC.S R11, R3, R3
MOVM.IA [R0-R3], (R8)
MOVW R5, R12
EOR R0, R0, R0
EOR R1, R1, R1
EOR R2, R2, R2
EOR R3, R3, R3
EOR R4, R4, R4
EOR R5, R5, R5
EOR R6, R6, R6
EOR R7, R7, R7
MOVM.IA.W [R0-R7], (R12)
MOVM.IA [R0-R7], (R12)
MOVW 4(R13), g
RET

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl
// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo appengine nacl
package poly1305

View File

@ -33,5 +33,6 @@ go_library(
"//vendor/golang.org/x/crypto/curve25519:go_default_library",
"//vendor/golang.org/x/crypto/ed25519:go_default_library",
"//vendor/golang.org/x/crypto/poly1305:go_default_library",
"//vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf:go_default_library",
],
)

View File

@ -0,0 +1,10 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["bcrypt_pbkdf.go"],
importmap = "k8s.io/kops/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf",
importpath = "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf",
visibility = ["//vendor/golang.org/x/crypto/ssh:__subpackages__"],
deps = ["//vendor/golang.org/x/crypto/blowfish:go_default_library"],
)

View File

@ -0,0 +1,93 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD.
//
// See https://flak.tedunangst.com/post/bcrypt-pbkdf and
// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c.
package bcrypt_pbkdf
import (
"crypto/sha512"
"errors"
"golang.org/x/crypto/blowfish"
)
const blockSize = 32
// Key derives a key from the password, salt and rounds count, returning a
// []byte of length keyLen that can be used as cryptographic key.
func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) {
if rounds < 1 {
return nil, errors.New("bcrypt_pbkdf: number of rounds is too small")
}
if len(password) == 0 {
return nil, errors.New("bcrypt_pbkdf: empty password")
}
if len(salt) == 0 || len(salt) > 1<<20 {
return nil, errors.New("bcrypt_pbkdf: bad salt length")
}
if keyLen > 1024 {
return nil, errors.New("bcrypt_pbkdf: keyLen is too large")
}
numBlocks := (keyLen + blockSize - 1) / blockSize
key := make([]byte, numBlocks*blockSize)
h := sha512.New()
h.Write(password)
shapass := h.Sum(nil)
shasalt := make([]byte, 0, sha512.Size)
cnt, tmp := make([]byte, 4), make([]byte, blockSize)
for block := 1; block <= numBlocks; block++ {
h.Reset()
h.Write(salt)
cnt[0] = byte(block >> 24)
cnt[1] = byte(block >> 16)
cnt[2] = byte(block >> 8)
cnt[3] = byte(block)
h.Write(cnt)
bcryptHash(tmp, shapass, h.Sum(shasalt))
out := make([]byte, blockSize)
copy(out, tmp)
for i := 2; i <= rounds; i++ {
h.Reset()
h.Write(tmp)
bcryptHash(tmp, shapass, h.Sum(shasalt))
for j := 0; j < len(out); j++ {
out[j] ^= tmp[j]
}
}
for i, v := range out {
key[i*numBlocks+(block-1)] = v
}
}
return key[:keyLen], nil
}
var magic = []byte("OxychromaticBlowfishSwatDynamite")
func bcryptHash(out, shapass, shasalt []byte) {
c, err := blowfish.NewSaltedCipher(shapass, shasalt)
if err != nil {
panic(err)
}
for i := 0; i < 64; i++ {
blowfish.ExpandKey(shasalt, c)
blowfish.ExpandKey(shapass, c)
}
copy(out, magic)
for i := 0; i < 32; i += 8 {
for j := 0; j < 64; j++ {
c.Encrypt(out[i:i+8], out[i:i+8])
}
}
// Swap bytes due to different endianness.
for i := 0; i < 32; i += 4 {
out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3]
}
}

View File

@ -7,6 +7,8 @@ package ssh
import (
"bytes"
"crypto"
"crypto/aes"
"crypto/cipher"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
@ -25,6 +27,7 @@ import (
"strings"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
)
// These constants represent the algorithm names for key types supported by this
@ -559,9 +562,11 @@ func parseED25519(in []byte) (out PublicKey, rest []byte, err error) {
return nil, nil, err
}
key := ed25519.PublicKey(w.KeyBytes)
if l := len(w.KeyBytes); l != ed25519.PublicKeySize {
return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l)
}
return (ed25519PublicKey)(key), w.Rest, nil
return ed25519PublicKey(w.KeyBytes), w.Rest, nil
}
func (k ed25519PublicKey) Marshal() []byte {
@ -579,9 +584,11 @@ func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
if l := len(k); l != ed25519.PublicKeySize {
return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l)
}
edKey := (ed25519.PublicKey)(k)
if ok := ed25519.Verify(edKey, b, sig.Blob); !ok {
if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok {
return errors.New("ssh: signature did not verify")
}
@ -835,6 +842,10 @@ func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) {
return nil, nil, err
}
if l := len(w.KeyBytes); l != ed25519.PublicKeySize {
return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l)
}
key := new(skEd25519PublicKey)
key.application = w.Application
key.PublicKey = ed25519.PublicKey(w.KeyBytes)
@ -859,6 +870,9 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
if l := len(k.PublicKey); l != ed25519.PublicKeySize {
return fmt.Errorf("invalid size %d for Ed25519 public key", l)
}
h := sha256.New()
h.Write([]byte(k.application))
@ -895,8 +909,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
original := Marshal(blob)
edKey := (ed25519.PublicKey)(k.PublicKey)
if ok := ed25519.Verify(edKey, original, edSig.Signature); !ok {
if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok {
return errors.New("ssh: signature did not verify")
}
@ -1048,14 +1061,18 @@ func NewPublicKey(key interface{}) (PublicKey, error) {
case *dsa.PublicKey:
return (*dsaPublicKey)(key), nil
case ed25519.PublicKey:
return (ed25519PublicKey)(key), nil
if l := len(key); l != ed25519.PublicKeySize {
return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l)
}
return ed25519PublicKey(key), nil
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", key)
}
}
// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
// the same keys as ParseRawPrivateKey.
// the same keys as ParseRawPrivateKey. If the private key is encrypted, it
// will return a PassphraseMissingError.
func ParsePrivateKey(pemBytes []byte) (Signer, error) {
key, err := ParseRawPrivateKey(pemBytes)
if err != nil {
@ -1068,8 +1085,8 @@ func ParsePrivateKey(pemBytes []byte) (Signer, error) {
// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private
// key and passphrase. It supports the same keys as
// ParseRawPrivateKeyWithPassphrase.
func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) {
key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase)
func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) {
key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase)
if err != nil {
return nil, err
}
@ -1085,8 +1102,21 @@ func encryptedBlock(block *pem.Block) bool {
return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED")
}
// A PassphraseMissingError indicates that parsing this private key requires a
// passphrase. Use ParsePrivateKeyWithPassphrase.
type PassphraseMissingError struct {
// PublicKey will be set if the private key format includes an unencrypted
// public key along with the encrypted private key.
PublicKey PublicKey
}
func (*PassphraseMissingError) Error() string {
return "ssh: this private key is passphrase protected"
}
// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys.
// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the
// private key is encrypted, it will return a PassphraseMissingError.
func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
@ -1094,7 +1124,7 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
}
if encryptedBlock(block) {
return nil, errors.New("ssh: cannot decode encrypted private keys")
return nil, &PassphraseMissingError{}
}
switch block.Type {
@ -1108,33 +1138,35 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
case "DSA PRIVATE KEY":
return ParseDSAPrivateKey(block.Bytes)
case "OPENSSH PRIVATE KEY":
return parseOpenSSHPrivateKey(block.Bytes)
return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey)
default:
return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
}
}
// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with
// passphrase from a PEM encoded private key. If wrong passphrase, return
// x509.IncorrectPasswordError.
func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) {
// passphrase from a PEM encoded private key. If the passphrase is wrong, it
// will return x509.IncorrectPasswordError.
func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("ssh: no key found")
}
buf := block.Bytes
if encryptedBlock(block) {
if x509.IsEncryptedPEMBlock(block) {
var err error
buf, err = x509.DecryptPEMBlock(block, passPhrase)
if err != nil {
if err == x509.IncorrectPasswordError {
return nil, err
}
return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err)
}
if block.Type == "OPENSSH PRIVATE KEY" {
return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase))
}
if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) {
return nil, errors.New("ssh: not an encrypted key")
}
buf, err := x509.DecryptPEMBlock(block, passphrase)
if err != nil {
if err == x509.IncorrectPasswordError {
return nil, err
}
return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err)
}
switch block.Type {
@ -1144,8 +1176,6 @@ func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{},
return x509.ParseECPrivateKey(buf)
case "DSA PRIVATE KEY":
return ParseDSAPrivateKey(buf)
case "OPENSSH PRIVATE KEY":
return parseOpenSSHPrivateKey(buf)
default:
return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
}
@ -1183,9 +1213,60 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
}, nil
}
// Implemented based on the documentation at
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) {
if kdfName != "none" || cipherName != "none" {
return nil, &PassphraseMissingError{}
}
if kdfOpts != "" {
return nil, errors.New("ssh: invalid openssh private key")
}
return privKeyBlock, nil
}
func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc {
return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) {
if kdfName == "none" || cipherName == "none" {
return nil, errors.New("ssh: key is not password protected")
}
if kdfName != "bcrypt" {
return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt")
}
var opts struct {
Salt string
Rounds uint32
}
if err := Unmarshal([]byte(kdfOpts), &opts); err != nil {
return nil, err
}
k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16)
if err != nil {
return nil, err
}
key, iv := k[:32], k[32:]
if cipherName != "aes256-ctr" {
return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q", cipherName, "aes256-ctr")
}
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
ctr := cipher.NewCTR(c, iv)
ctr.XORKeyStream(privKeyBlock, privKeyBlock)
return privKeyBlock, nil
}
}
type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error)
// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt
// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used
// as the decrypt function to parse an unencrypted private key. See
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key.
func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) {
const magic = "openssh-key-v1\x00"
if len(key) < len(magic) || string(key[:len(magic)]) != magic {
return nil, errors.New("ssh: invalid openssh private key format")
@ -1204,9 +1285,22 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
if err := Unmarshal(remaining, &w); err != nil {
return nil, err
}
if w.NumKeys != 1 {
// We only support single key files, and so does OpenSSH.
// https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171
return nil, errors.New("ssh: multi-key files are not supported")
}
if w.KdfName != "none" || w.CipherName != "none" {
return nil, errors.New("ssh: cannot decode encrypted private keys")
privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock)
if err != nil {
if err, ok := err.(*PassphraseMissingError); ok {
pub, errPub := ParsePublicKey(w.PubKey)
if errPub != nil {
return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub)
}
err.PublicKey = pub
}
return nil, err
}
pk1 := struct {
@ -1216,15 +1310,13 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
Rest []byte `ssh:"rest"`
}{}
if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil {
return nil, err
if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 {
if w.CipherName != "none" {
return nil, x509.IncorrectPasswordError
}
return nil, errors.New("ssh: malformed OpenSSH key")
}
if pk1.Check1 != pk1.Check2 {
return nil, errors.New("ssh: checkint mismatch")
}
// we only handle ed25519 and rsa keys currently
switch pk1.Keytype {
case KeyAlgoRSA:
// https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773
@ -1243,10 +1335,8 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
return nil, err
}
for i, b := range key.Pad {
if int(b) != i+1 {
return nil, errors.New("ssh: padding not as expected")
}
if err := checkOpenSSHKeyPadding(key.Pad); err != nil {
return nil, err
}
pk := &rsa.PrivateKey{
@ -1281,20 +1371,78 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
return nil, errors.New("ssh: private key unexpected length")
}
for i, b := range key.Pad {
if int(b) != i+1 {
return nil, errors.New("ssh: padding not as expected")
}
if err := checkOpenSSHKeyPadding(key.Pad); err != nil {
return nil, err
}
pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize))
copy(pk, key.Priv)
return &pk, nil
case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
key := struct {
Curve string
Pub []byte
D *big.Int
Comment string
Pad []byte `ssh:"rest"`
}{}
if err := Unmarshal(pk1.Rest, &key); err != nil {
return nil, err
}
if err := checkOpenSSHKeyPadding(key.Pad); err != nil {
return nil, err
}
var curve elliptic.Curve
switch key.Curve {
case "nistp256":
curve = elliptic.P256()
case "nistp384":
curve = elliptic.P384()
case "nistp521":
curve = elliptic.P521()
default:
return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve)
}
X, Y := elliptic.Unmarshal(curve, key.Pub)
if X == nil || Y == nil {
return nil, errors.New("ssh: failed to unmarshal public key")
}
if key.D.Cmp(curve.Params().N) >= 0 {
return nil, errors.New("ssh: scalar is out of range")
}
x, y := curve.ScalarBaseMult(key.D.Bytes())
if x.Cmp(X) != 0 || y.Cmp(Y) != 0 {
return nil, errors.New("ssh: public key does not match private key")
}
return &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: curve,
X: X,
Y: Y,
},
D: key.D,
}, nil
default:
return nil, errors.New("ssh: unhandled key type")
}
}
func checkOpenSSHKeyPadding(pad []byte) error {
for i, b := range pad {
if int(b) != i+1 {
return errors.New("ssh: padding not as expected")
}
}
return nil
}
// FingerprintLegacyMD5 returns the user presentation of the key's
// fingerprint as described by RFC 4716 section 4.
func FingerprintLegacyMD5(pubKey PublicKey) string {

View File

@ -7,6 +7,7 @@ package terminal
import (
"bytes"
"io"
"runtime"
"strconv"
"sync"
"unicode/utf8"
@ -939,6 +940,8 @@ func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
// readPasswordLine reads from reader until it finds \n or io.EOF.
// The slice returned does not include the \n.
// readPasswordLine also ignores any \r it finds.
// Windows uses \r as end of line. So, on Windows, readPasswordLine
// reads until it finds \r and ignores any \n it finds during processing.
func readPasswordLine(reader io.Reader) ([]byte, error) {
var buf [1]byte
var ret []byte
@ -947,10 +950,20 @@ func readPasswordLine(reader io.Reader) ([]byte, error) {
n, err := reader.Read(buf[:])
if n > 0 {
switch buf[0] {
case '\b':
if len(ret) > 0 {
ret = ret[:len(ret)-1]
}
case '\n':
return ret, nil
if runtime.GOOS != "windows" {
return ret, nil
}
// otherwise ignore \n
case '\r':
// remove \r from passwords on Windows
if runtime.GOOS == "windows" {
return ret, nil
}
// otherwise ignore \r
default:
ret = append(ret, buf[0])
}

View File

@ -85,8 +85,8 @@ func ReadPassword(fd int) ([]byte, error) {
}
old := st
st &^= (windows.ENABLE_ECHO_INPUT)
st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
st &^= (windows.ENABLE_ECHO_INPUT | windows.ENABLE_LINE_INPUT)
st |= (windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_PROCESSED_INPUT)
if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil {
return nil, err
}

47
vendor/gopkg.in/yaml.v2/scannerc.go generated vendored
View File

@ -626,32 +626,18 @@ func trace(args ...interface{}) func() {
func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
// While we need more tokens to fetch, do it.
for {
// Check if we really need to fetch more tokens.
need_more_tokens := false
if parser.tokens_head == len(parser.tokens) {
// Queue is empty.
need_more_tokens = true
} else {
// Check if any potential simple key may occupy the head position.
for i := len(parser.simple_keys) - 1; i >= 0; i-- {
simple_key := &parser.simple_keys[i]
if simple_key.token_number < parser.tokens_parsed {
break
}
if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
return false
} else if valid && simple_key.token_number == parser.tokens_parsed {
need_more_tokens = true
break
}
if parser.tokens_head != len(parser.tokens) {
// If queue is non-empty, check if any potential simple key may
// occupy the head position.
head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
if !ok {
break
} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
return false
} else if !valid {
break
}
}
// We are finished.
if !need_more_tokens {
break
}
// Fetch the next token.
if !yaml_parser_fetch_next_token(parser) {
return false
@ -883,6 +869,7 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
return false
}
parser.simple_keys[len(parser.simple_keys)-1] = simple_key
parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
}
return true
}
@ -897,9 +884,10 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
"while scanning a simple key", parser.simple_keys[i].mark,
"could not find expected ':'")
}
// Remove the key from the stack.
parser.simple_keys[i].possible = false
delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
}
// Remove the key from the stack.
parser.simple_keys[i].possible = false
return true
}
@ -930,7 +918,9 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
if parser.flow_level > 0 {
parser.flow_level--
parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
last := len(parser.simple_keys) - 1
delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
parser.simple_keys = parser.simple_keys[:last]
}
return true
}
@ -1007,6 +997,8 @@ func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
// Initialize the simple key stack.
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
parser.simple_keys_by_tok = make(map[int]int)
// A simple key is allowed at the beginning of the stream.
parser.simple_key_allowed = true
@ -1310,6 +1302,7 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
// Remove the simple key.
simple_key.possible = false
delete(parser.simple_keys_by_tok, simple_key.token_number)
// A simple key cannot follow another simple key.
parser.simple_key_allowed = false

1
vendor/gopkg.in/yaml.v2/yamlh.go generated vendored
View File

@ -579,6 +579,7 @@ type yaml_parser_t struct {
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
// Parser stuff

View File

@ -131,7 +131,7 @@ message MutatingWebhook {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
// SideEffects states whether this webhookk has side effects.
// SideEffects states whether this webhook has side effects.
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
// rejected by a future step in the admission change and the side effects therefore need to be undone.
@ -385,7 +385,7 @@ message ValidatingWebhook {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
// SideEffects states whether this webhookk has side effects.
// SideEffects states whether this webhook has side effects.
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
// rejected by a future step in the admission change and the side effects therefore need to be undone.

View File

@ -275,7 +275,7 @@ type ValidatingWebhook struct {
// +optional
ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"`
// SideEffects states whether this webhookk has side effects.
// SideEffects states whether this webhook has side effects.
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
// rejected by a future step in the admission change and the side effects therefore need to be undone.
@ -407,7 +407,7 @@ type MutatingWebhook struct {
// +optional
ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"`
// SideEffects states whether this webhookk has side effects.
// SideEffects states whether this webhook has side effects.
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
// Webhooks with side effects MUST implement a reconciliation system, since a request may be
// rejected by a future step in the admission change and the side effects therefore need to be undone.

View File

@ -36,7 +36,7 @@ var map_MutatingWebhook = map[string]string{
"matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"",
"namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
"objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
"sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
"sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.",
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.",
"reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".",
@ -108,7 +108,7 @@ var map_ValidatingWebhook = map[string]string{
"matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"",
"namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
"objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
"sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
"sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.",
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.",
}

View File

@ -151,7 +151,7 @@ type ScaleStatus struct {
// MetricSourceType indicates the type of metric.
type MetricSourceType string
var (
const (
// ObjectMetricSourceType is a metric describing a kubernetes object
// (for example, hits-per-second on an Ingress object).
ObjectMetricSourceType MetricSourceType = "Object"
@ -322,7 +322,7 @@ type MetricStatus struct {
// a HorizontalPodAutoscaler.
type HorizontalPodAutoscalerConditionType string
var (
const (
// ScalingActive indicates that the HPA controller is able to scale if necessary:
// it's correctly configured, can fetch the desired metrics, and isn't disabled.
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"

View File

@ -62,7 +62,7 @@ type HorizontalPodAutoscalerSpec struct {
// MetricSourceType indicates the type of metric.
type MetricSourceType string
var (
const (
// ObjectMetricSourceType is a metric describing a kubernetes object
// (for example, hits-per-second on an Ingress object).
ObjectMetricSourceType MetricSourceType = "Object"
@ -231,7 +231,7 @@ type HorizontalPodAutoscalerStatus struct {
// a HorizontalPodAutoscaler.
type HorizontalPodAutoscalerConditionType string
var (
const (
// ScalingActive indicates that the HPA controller is able to scale if necessary:
// it's correctly configured, can fetch the desired metrics, and isn't disabled.
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"

View File

@ -120,7 +120,7 @@ type MetricSpec struct {
// MetricSourceType indicates the type of metric.
type MetricSourceType string
var (
const (
// ObjectMetricSourceType is a metric describing a kubernetes object
// (for example, hits-per-second on an Ingress object).
ObjectMetricSourceType MetricSourceType = "Object"
@ -221,7 +221,7 @@ type MetricTarget struct {
// "Value", "AverageValue", or "Utilization"
type MetricTargetType string
var (
const (
// UtilizationMetricType declares a MetricTarget is an AverageUtilization value
UtilizationMetricType MetricTargetType = "Utilization"
// ValueMetricType declares a MetricTarget is a raw value
@ -262,7 +262,7 @@ type HorizontalPodAutoscalerStatus struct {
// a HorizontalPodAutoscaler.
type HorizontalPodAutoscalerConditionType string
var (
const (
// ScalingActive indicates that the HPA controller is able to scale if necessary:
// it's correctly configured, can fetch the desired metrics, and isn't disabled.
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"

View File

@ -129,27 +129,27 @@ type CertificateSigningRequestList struct {
type KeyUsage string
const (
UsageSigning KeyUsage = "signing"
UsageDigitalSignature KeyUsage = "digital signature"
UsageContentCommittment KeyUsage = "content commitment"
UsageKeyEncipherment KeyUsage = "key encipherment"
UsageKeyAgreement KeyUsage = "key agreement"
UsageDataEncipherment KeyUsage = "data encipherment"
UsageCertSign KeyUsage = "cert sign"
UsageCRLSign KeyUsage = "crl sign"
UsageEncipherOnly KeyUsage = "encipher only"
UsageDecipherOnly KeyUsage = "decipher only"
UsageAny KeyUsage = "any"
UsageServerAuth KeyUsage = "server auth"
UsageClientAuth KeyUsage = "client auth"
UsageCodeSigning KeyUsage = "code signing"
UsageEmailProtection KeyUsage = "email protection"
UsageSMIME KeyUsage = "s/mime"
UsageIPsecEndSystem KeyUsage = "ipsec end system"
UsageIPsecTunnel KeyUsage = "ipsec tunnel"
UsageIPsecUser KeyUsage = "ipsec user"
UsageTimestamping KeyUsage = "timestamping"
UsageOCSPSigning KeyUsage = "ocsp signing"
UsageMicrosoftSGC KeyUsage = "microsoft sgc"
UsageNetscapSGC KeyUsage = "netscape sgc"
UsageSigning KeyUsage = "signing"
UsageDigitalSignature KeyUsage = "digital signature"
UsageContentCommitment KeyUsage = "content commitment"
UsageKeyEncipherment KeyUsage = "key encipherment"
UsageKeyAgreement KeyUsage = "key agreement"
UsageDataEncipherment KeyUsage = "data encipherment"
UsageCertSign KeyUsage = "cert sign"
UsageCRLSign KeyUsage = "crl sign"
UsageEncipherOnly KeyUsage = "encipher only"
UsageDecipherOnly KeyUsage = "decipher only"
UsageAny KeyUsage = "any"
UsageServerAuth KeyUsage = "server auth"
UsageClientAuth KeyUsage = "client auth"
UsageCodeSigning KeyUsage = "code signing"
UsageEmailProtection KeyUsage = "email protection"
UsageSMIME KeyUsage = "s/mime"
UsageIPsecEndSystem KeyUsage = "ipsec end system"
UsageIPsecTunnel KeyUsage = "ipsec tunnel"
UsageIPsecUser KeyUsage = "ipsec user"
UsageTimestamping KeyUsage = "timestamping"
UsageOCSPSigning KeyUsage = "ocsp signing"
UsageMicrosoftSGC KeyUsage = "microsoft sgc"
UsageNetscapeSGC KeyUsage = "netscape sgc"
)

View File

@ -14,6 +14,7 @@ go_library(
"types.go",
"types_swagger_doc_generated.go",
"well_known_labels.go",
"well_known_taints.go",
"zz_generated.deepcopy.go",
],
importmap = "k8s.io/kops/vendor/k8s.io/api/core/v1",

File diff suppressed because it is too large Load Diff

View File

@ -1142,7 +1142,7 @@ message EnvVar {
// EnvVarSource represents a source for the value of an EnvVar.
message EnvVarSource {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
// +optional
optional ObjectFieldSelector fieldRef = 1;
@ -3145,6 +3145,15 @@ message PodLogOptions {
// slightly more or slightly less than the specified limit.
// +optional
optional int64 limitBytes = 8;
// insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
// serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver
// and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
// kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
// connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
// the actual log data coming from the real kubelet).
// +optional
optional bool insecureSkipTLSVerifyBackend = 9;
}
// PodPortForwardOptions is the query options to a Pod's port forward call
@ -3375,7 +3384,6 @@ message PodSpec {
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
optional bool shareProcessNamespace = 27;
@ -4733,6 +4741,21 @@ message ServiceSpec {
// cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
// +optional
optional string ipFamily = 15;
// topologyKeys is a preference-order list of topology keys which
// implementations of services should use to preferentially sort endpoints
// when accessing this Service, it can not be used at the same time as
// externalTrafficPolicy=Local.
// Topology keys must be valid label keys and at most 16 keys may be specified.
// Endpoints are chosen based on the first topology key with available backends.
// If this field is specified and all entries have no backends that match
// the topology of the client, the service has no backends for that client
// and connections should fail.
// The special value "*" may be used to mean "any topology". This catch-all
// value, if used, only makes sense as the last value in the list.
// If this is not specified or empty, no topology constraints will be applied.
// +optional
repeated string topologyKeys = 16;
}
// ServiceStatus represents the current status of a service.
@ -5032,7 +5055,6 @@ message VolumeMount {
// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
// Defaults to "" (volume's root).
// SubPathExpr and SubPath are mutually exclusive.
// This field is beta in 1.15.
// +optional
optional string subPathExpr = 6;
}
@ -5249,7 +5271,7 @@ message WindowsSecurityContextOptions {
// Defaults to the user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.
// This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.
// +optional
optional string runAsUserName = 3;
}

54
vendor/k8s.io/api/core/v1/types.go generated vendored
View File

@ -30,6 +30,8 @@ const (
NamespaceAll string = ""
// NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats)
NamespaceNodeLease string = "kube-node-lease"
// TopologyKeyAny is the service topology key that matches any node
TopologyKeyAny string = "*"
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
@ -1784,7 +1786,6 @@ type VolumeMount struct {
// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
// Defaults to "" (volume's root).
// SubPathExpr and SubPath are mutually exclusive.
// This field is beta in 1.15.
// +optional
SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"`
}
@ -1847,7 +1848,7 @@ type EnvVar struct {
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
@ -2941,7 +2942,6 @@ type PodSpec struct {
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
@ -3828,6 +3828,8 @@ const (
IPv4Protocol IPFamily = "IPv4"
// IPv6Protocol indicates that this IP is IPv6 protocol
IPv6Protocol IPFamily = "IPv6"
// MaxServiceTopologyKeys is the largest number of topology keys allowed on a service
MaxServiceTopologyKeys = 16
)
// ServiceSpec describes the attributes that a user creates on a service.
@ -3942,6 +3944,7 @@ type ServiceSpec struct {
// of peer discovery.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
@ -3955,6 +3958,21 @@ type ServiceSpec struct {
// cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
// +optional
IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"`
// topologyKeys is a preference-order list of topology keys which
// implementations of services should use to preferentially sort endpoints
// when accessing this Service, it can not be used at the same time as
// externalTrafficPolicy=Local.
// Topology keys must be valid label keys and at most 16 keys may be specified.
// Endpoints are chosen based on the first topology key with available backends.
// If this field is specified and all entries have no backends that match
// the topology of the client, the service has no backends for that client
// and connections should fail.
// The special value "*" may be used to mean "any topology". This catch-all
// value, if used, only makes sense as the last value in the list.
// If this is not specified or empty, no topology constraints will be applied.
// +optional
TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"`
}
// ServicePort contains information on service's port.
@ -4233,7 +4251,7 @@ type NodeSpec struct {
// Deprecated. Not all kubelets will set this field. Remove field after 1.13.
// see: https://issues.k8s.io/61966
// +optional
DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
}
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
@ -4660,6 +4678,12 @@ const (
NamespaceTerminating NamespacePhase = "Terminating"
)
const (
// NamespaceTerminatingCause is returned as a defaults.cause item when a change is
// forbidden due to the namespace being terminated.
NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating"
)
type NamespaceConditionType string
// These are valid conditions of a namespace.
@ -4670,6 +4694,10 @@ const (
NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure"
// NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types.
NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure"
// NamespaceContentRemaining contains information about resources remaining in a namespace.
NamespaceContentRemaining NamespaceConditionType = "NamespaceContentRemaining"
// NamespaceFinalizersRemaining contains information about which finalizers are on resources remaining in a namespace.
NamespaceFinalizersRemaining NamespaceConditionType = "NamespaceFinalizersRemaining"
)
// NamespaceCondition contains details about state of namespace.
@ -4765,6 +4793,7 @@ type Preconditions struct {
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
@ -4805,8 +4834,18 @@ type PodLogOptions struct {
// slightly more or slightly less than the specified limit.
// +optional
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
// insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
// serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver
// and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
// kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
// connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
// the actual log data coming from the real kubelet).
// +optional
InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodAttachOptions is the query options to a Pod's remote attach call.
@ -4844,6 +4883,7 @@ type PodAttachOptions struct {
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodExecOptions is the query options to a Pod's remote exec call.
@ -4882,6 +4922,7 @@ type PodExecOptions struct {
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodPortForwardOptions is the query options to a Pod's port forward call
@ -4899,6 +4940,7 @@ type PodPortForwardOptions struct {
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodProxyOptions is the query options to a Pod's proxy call.
@ -4910,6 +4952,7 @@ type PodProxyOptions struct {
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeProxyOptions is the query options to a Node's proxy call.
@ -4921,6 +4964,7 @@ type NodeProxyOptions struct {
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceProxyOptions is the query options to a Service's proxy call.
@ -5764,7 +5808,7 @@ type WindowsSecurityContextOptions struct {
// Defaults to the user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.
// This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.
// +optional
RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"`
}

View File

@ -566,7 +566,7 @@ func (EnvVar) SwaggerDoc() map[string]string {
var map_EnvVarSource = map[string]string{
"": "EnvVarSource represents a source for the value of an EnvVar.",
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.",
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.",
"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
"configMapKeyRef": "Selects a key of a ConfigMap.",
"secretKeyRef": "Selects a key of a secret in the pod's namespace",
@ -1528,15 +1528,16 @@ func (PodList) SwaggerDoc() map[string]string {
}
var map_PodLogOptions = map[string]string{
"": "PodLogOptions is the query options for a Pod's logs REST call.",
"container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
"follow": "Follow the log stream of the pod. Defaults to false.",
"previous": "Return previous terminated container logs. Defaults to false.",
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
"": "PodLogOptions is the query options for a Pod's logs REST call.",
"container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
"follow": "Follow the log stream of the pod. Defaults to false.",
"previous": "Return previous terminated container logs. Defaults to false.",
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
"insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).",
}
func (PodLogOptions) SwaggerDoc() map[string]string {
@ -1613,7 +1614,7 @@ var map_PodSpec = map[string]string{
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
"shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.",
"shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
"securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
"imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
"hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
@ -2203,6 +2204,7 @@ var map_ServiceSpec = map[string]string{
"publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.",
"sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.",
"ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.",
"topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.",
}
func (ServiceSpec) SwaggerDoc() map[string]string {
@ -2366,7 +2368,7 @@ var map_VolumeMount = map[string]string{
"mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.",
"subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).",
"mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.",
"subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.",
"subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.",
}
func (VolumeMount) SwaggerDoc() map[string]string {
@ -2456,7 +2458,7 @@ var map_WindowsSecurityContextOptions = map[string]string{
"": "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
"gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
"runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.",
"runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.",
}
func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string {

View File

@ -17,15 +17,23 @@ limitations under the License.
package v1
const (
LabelHostname = "kubernetes.io/hostname"
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
LabelHostname = "kubernetes.io/hostname"
LabelInstanceType = "beta.kubernetes.io/instance-type"
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
LabelZoneFailureDomainStable = "topology.kubernetes.io/zone"
LabelZoneRegionStable = "topology.kubernetes.io/region"
LabelInstanceType = "beta.kubernetes.io/instance-type"
LabelInstanceTypeStable = "node.kubernetes.io/instance-type"
LabelOSStable = "kubernetes.io/os"
LabelArchStable = "kubernetes.io/arch"
// LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0.
// It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763)
LabelWindowsBuild = "node.kubernetes.io/windows-build"
// LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*)
LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io"
// LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*)

55
vendor/k8s.io/api/core/v1/well_known_taints.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
const (
// TaintNodeNotReady will be added when node is not ready
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when node becomes ready.
TaintNodeNotReady = "node.kubernetes.io/not-ready"
// TaintNodeUnreachable will be added when node becomes unreachable
// (corresponding to NodeReady status ConditionUnknown)
// and feature-gate for TaintBasedEvictions flag is enabled,
// and removed when node becomes reachable (NodeReady status ConditionTrue).
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
// TaintNodeUnschedulable will be added when node becomes unschedulable
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when node becomes scheduable.
TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"
// TaintNodeMemoryPressure will be added when node has memory pressure
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when node has enough memory.
TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
// TaintNodeDiskPressure will be added when node has disk pressure
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when node has enough disk.
TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
// TaintNodeNetworkUnavailable will be added when node's network is unavailable
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when network becomes ready.
TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
// TaintNodePIDPressure will be added when node has pid pressure
// and feature-gate for TaintNodesByCondition flag is enabled,
// and removed when node has enough disk.
TaintNodePIDPressure = "node.kubernetes.io/pid-pressure"
)

View File

@ -5186,6 +5186,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
*out = new(IPFamily)
**out = **in
}
if in.TopologyKeys != nil {
in, out := &in.TopologyKeys, &out.TopologyKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@ -200,53 +200,54 @@ func init() {
}
var fileDescriptor_772f83c5b34e07a5 = []byte{
// 728 bytes of a gzipped FileDescriptorProto
// 746 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a,
0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0xd4, 0x8e, 0xee, 0x22, 0xca, 0xbd, 0xd7, 0x8e, 0xc2,
0x82, 0x48, 0x85, 0x31, 0xa9, 0x28, 0xaa, 0x60, 0x43, 0x8d, 0xca, 0x43, 0xe2, 0x11, 0x86, 0x2e,
0x10, 0x62, 0xc1, 0xc4, 0x9e, 0x3a, 0x26, 0x89, 0xc7, 0xb2, 0x27, 0x91, 0xb2, 0xe3, 0x27, 0x20,
0xf1, 0x77, 0x58, 0xb2, 0xe8, 0xb2, 0xcb, 0xae, 0x0c, 0x35, 0xff, 0xa2, 0x2b, 0x34, 0xe3, 0x57,
0x4a, 0x78, 0x64, 0x37, 0xe7, 0x9b, 0xf3, 0x7d, 0xe7, 0x9c, 0x6f, 0xce, 0x80, 0x87, 0xe3, 0x83,
0x18, 0xf9, 0xcc, 0x1a, 0xcf, 0x86, 0x34, 0x0a, 0x28, 0xa7, 0xb1, 0x35, 0xa7, 0x81, 0xcb, 0x22,
0x2b, 0xbf, 0x20, 0xa1, 0x6f, 0xb9, 0x7e, 0xec, 0xb0, 0x39, 0x8d, 0x16, 0xd6, 0xbc, 0x4f, 0x26,
0xe1, 0x88, 0xf4, 0x2d, 0x8f, 0x06, 0x34, 0x22, 0x9c, 0xba, 0x28, 0x8c, 0x18, 0x67, 0xf0, 0xff,
0x2c, 0x1d, 0x91, 0xd0, 0x47, 0x65, 0x3a, 0x2a, 0xd2, 0xdb, 0x37, 0x3d, 0x9f, 0x8f, 0x66, 0x43,
0xe4, 0xb0, 0xa9, 0xe5, 0x31, 0x8f, 0x59, 0x92, 0x35, 0x9c, 0x9d, 0xc8, 0x48, 0x06, 0xf2, 0x94,
0xa9, 0xb5, 0xbb, 0x4b, 0xc5, 0x1d, 0x16, 0x51, 0x6b, 0xbe, 0x52, 0xb1, 0x7d, 0xbb, 0xca, 0x99,
0x12, 0x67, 0xe4, 0x07, 0xa2, 0xbf, 0x70, 0xec, 0x09, 0x20, 0xb6, 0xa6, 0x94, 0x93, 0x5f, 0xb1,
0xac, 0xdf, 0xb1, 0xa2, 0x59, 0xc0, 0xfd, 0x29, 0x5d, 0x21, 0xdc, 0xf9, 0x1b, 0x21, 0x76, 0x46,
0x74, 0x4a, 0x7e, 0xe6, 0x75, 0x3f, 0xd7, 0x81, 0x76, 0x14, 0xb8, 0x21, 0xf3, 0x03, 0x0e, 0x77,
0x81, 0x4e, 0x5c, 0x37, 0xa2, 0x71, 0x4c, 0xe3, 0x96, 0xd2, 0xa9, 0xf7, 0x74, 0xbb, 0x99, 0x26,
0xa6, 0x7e, 0x58, 0x80, 0xb8, 0xba, 0x87, 0x14, 0x00, 0x87, 0x05, 0xae, 0xcf, 0x7d, 0x16, 0xc4,
0xad, 0x8d, 0x8e, 0xd2, 0x6b, 0xec, 0xf5, 0xd1, 0x1f, 0xfd, 0x45, 0x45, 0xa5, 0x07, 0x25, 0xd1,
0x86, 0xa7, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5e, 0x12, 0x86, 0x3d, 0xa0, 0x8d, 0x58,
0xcc, 0x03, 0x32, 0xa5, 0xad, 0x7a, 0x47, 0xe9, 0xe9, 0xf6, 0x56, 0x9a, 0x98, 0xda, 0xe3, 0x1c,
0xc3, 0xe5, 0x2d, 0x1c, 0x00, 0x9d, 0x93, 0xc8, 0xa3, 0x1c, 0xd3, 0x93, 0xd6, 0xa6, 0xec, 0xe7,
0xda, 0x72, 0x3f, 0xe2, 0x85, 0xd0, 0xbc, 0x8f, 0x5e, 0x0c, 0xdf, 0x53, 0x47, 0x24, 0xd1, 0x88,
0x06, 0x0e, 0xcd, 0x46, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84,
0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x0e, 0x88, 0x8e, 0x73, 0xde, 0x51,
0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x0f, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x40, 0xf3, 0x4a,
0x32, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0x31, 0x2c, 0x16, 0x47, 0xf8, 0x0f, 0x50, 0xe7,
0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0xc1, 0xdd, 0x8d, 0x03, 0xa5, 0xbb, 0x0f, 0xe0, 0xaa,
0xa7, 0xd0, 0x04, 0x6a, 0x44, 0x89, 0x9b, 0x69, 0x68, 0xb6, 0x9e, 0x26, 0xa6, 0x8a, 0x05, 0x80,
0x33, 0xbc, 0xfb, 0x49, 0x01, 0x5b, 0x05, 0x6f, 0xc0, 0x22, 0x0e, 0xff, 0x03, 0x9b, 0xd2, 0x61,
0x59, 0xd4, 0xd6, 0xd2, 0xc4, 0xdc, 0x7c, 0x2e, 0xdc, 0x95, 0x28, 0x7c, 0x04, 0x34, 0xb9, 0x2d,
0x0e, 0x9b, 0x64, 0x2d, 0xd8, 0xbb, 0x62, 0x98, 0x41, 0x8e, 0x5d, 0x26, 0xe6, 0xbf, 0xab, 0x3f,
0x01, 0x15, 0xd7, 0xb8, 0x24, 0x8b, 0x32, 0x21, 0x8b, 0xb8, 0x7c, 0x48, 0x35, 0x2b, 0x23, 0xca,
0x63, 0x89, 0x76, 0xbf, 0x6e, 0x80, 0x66, 0xd1, 0xd5, 0xab, 0x89, 0xef, 0x50, 0xf8, 0x0e, 0x68,
0xe2, 0x87, 0xb8, 0x84, 0x13, 0xd9, 0x5a, 0x63, 0xef, 0xd6, 0xd2, 0x03, 0x94, 0x8b, 0x8e, 0xc2,
0xb1, 0x27, 0x80, 0x18, 0x89, 0xec, 0xea, 0x8d, 0x9f, 0x51, 0x4e, 0xaa, 0x05, 0xab, 0x30, 0x5c,
0xaa, 0xc2, 0xfb, 0xa0, 0x91, 0xaf, 0xf4, 0xf1, 0x22, 0xa4, 0x72, 0x6d, 0x74, 0xdb, 0x48, 0x13,
0xb3, 0x71, 0x58, 0xc1, 0x97, 0x57, 0x43, 0xbc, 0x4c, 0x81, 0xaf, 0x81, 0x4e, 0xf3, 0xa6, 0xc5,
0x37, 0x10, 0x5b, 0x72, 0x7d, 0xcd, 0x2d, 0xb1, 0x77, 0xf2, 0xde, 0xf4, 0x02, 0x89, 0x71, 0x25,
0x06, 0x07, 0x40, 0x15, 0xbe, 0xc4, 0xad, 0xba, 0x54, 0xdd, 0x5d, 0x53, 0x55, 0x38, 0x6a, 0x37,
0x73, 0x65, 0x55, 0x44, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xc5, 0xe1, 0xa7, 0x7e,
0xcc, 0xe1, 0xdb, 0x15, 0x97, 0xd1, 0x7a, 0x2e, 0x0b, 0xb6, 0xf4, 0xb8, 0xdc, 0xef, 0x02, 0x59,
0x72, 0xf8, 0x25, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd6, 0x9c, 0x42, 0xb6, 0x57, 0x8d,
0xf1, 0x44, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, 0x9d, 0x5f,
0x18, 0xb5, 0x0f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, 0x28, 0xdf,
0x52, 0x43, 0xf9, 0xf8, 0xdd, 0xa8, 0xbd, 0xd1, 0x0a, 0xcd, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff,
0x04, 0x9d, 0x1a, 0x33, 0x3e, 0x06, 0x00, 0x00,
0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0x6e, 0x3b, 0xba, 0x8b, 0x28, 0xf7, 0x5e, 0x3b, 0xca,
0x5d, 0x10, 0xa9, 0x30, 0x26, 0x15, 0x45, 0x15, 0xac, 0x6a, 0x28, 0x0f, 0x89, 0x47, 0x18, 0xba,
0x40, 0x88, 0x05, 0x13, 0x7b, 0xea, 0x98, 0x24, 0x1e, 0xcb, 0x9e, 0x44, 0xca, 0x8e, 0x9f, 0xc0,
0x0f, 0x62, 0x89, 0x50, 0x97, 0x5d, 0x76, 0x65, 0x51, 0xf7, 0x5f, 0x74, 0x85, 0x66, 0xfc, 0x4a,
0x09, 0x8f, 0xec, 0x66, 0xbe, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xe6, 0x1c, 0xf0, 0x68, 0x7c, 0x10,
0x23, 0x9f, 0x59, 0xe3, 0xd9, 0x90, 0x46, 0x01, 0xe5, 0x34, 0xb6, 0xe6, 0x34, 0x70, 0x59, 0x64,
0xe5, 0x0f, 0x24, 0xf4, 0x2d, 0xd7, 0x8f, 0x1d, 0x36, 0xa7, 0xd1, 0xc2, 0x9a, 0xf7, 0xc9, 0x24,
0x1c, 0x91, 0xbe, 0xe5, 0xd1, 0x80, 0x46, 0x84, 0x53, 0x17, 0x85, 0x11, 0xe3, 0x0c, 0xfe, 0x97,
0x85, 0x23, 0x12, 0xfa, 0xa8, 0x0c, 0x47, 0x45, 0x78, 0xfb, 0x96, 0xe7, 0xf3, 0xd1, 0x6c, 0x88,
0x1c, 0x36, 0xb5, 0x3c, 0xe6, 0x31, 0x4b, 0xb2, 0x86, 0xb3, 0x13, 0x79, 0x93, 0x17, 0x79, 0xca,
0xd4, 0xda, 0xdd, 0xa5, 0xe4, 0x0e, 0x8b, 0xa8, 0x35, 0x5f, 0xc9, 0xd8, 0xbe, 0x53, 0xc5, 0x4c,
0x89, 0x33, 0xf2, 0x03, 0x51, 0x5f, 0x38, 0xf6, 0x04, 0x10, 0x5b, 0x53, 0xca, 0xc9, 0xcf, 0x58,
0xd6, 0xaf, 0x58, 0xd1, 0x2c, 0xe0, 0xfe, 0x94, 0xae, 0x10, 0xee, 0xfe, 0x89, 0x10, 0x3b, 0x23,
0x3a, 0x25, 0x3f, 0xf2, 0xba, 0x9f, 0xeb, 0x40, 0x3b, 0x0a, 0xdc, 0x90, 0xf9, 0x01, 0x87, 0xbb,
0x40, 0x27, 0xae, 0x1b, 0xd1, 0x38, 0xa6, 0x71, 0x4b, 0xe9, 0xd4, 0x7b, 0xba, 0xdd, 0x4c, 0x13,
0x53, 0x3f, 0x2c, 0x40, 0x5c, 0xbd, 0x43, 0x0a, 0x80, 0xc3, 0x02, 0xd7, 0xe7, 0x3e, 0x0b, 0xe2,
0xd6, 0x46, 0x47, 0xe9, 0x35, 0xf6, 0xfa, 0xe8, 0xb7, 0xfe, 0xa2, 0x22, 0xd3, 0x83, 0x92, 0x68,
0xc3, 0xd3, 0xc4, 0xac, 0xa5, 0x89, 0x09, 0x2a, 0x0c, 0x2f, 0x09, 0xc3, 0x1e, 0xd0, 0x46, 0x2c,
0xe6, 0x01, 0x99, 0xd2, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x7b, 0x2b, 0x4d, 0x4c, 0xed, 0x49, 0x8e,
0xe1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x49, 0xe4, 0x51, 0x8e, 0xe9, 0x49, 0x6b, 0x53, 0xd6, 0xf3,
0xff, 0x72, 0x3d, 0xe2, 0x87, 0xd0, 0xbc, 0x8f, 0x5e, 0x0e, 0x3f, 0x50, 0x47, 0x04, 0xd1, 0x88,
0x06, 0x0e, 0xcd, 0x5a, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84,
0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x36, 0x88, 0x8e, 0x73, 0xde, 0x51,
0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x37, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x41, 0xf3, 0x5a,
0x30, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0xd1, 0x2c, 0x16, 0x47, 0xf8, 0x37, 0x50, 0xe7,
0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0x97, 0x7b, 0x1b, 0x07, 0x4a, 0x77, 0x1f, 0xc0, 0x55,
0x4f, 0xa1, 0x09, 0xd4, 0x88, 0x12, 0x37, 0xd3, 0xd0, 0x6c, 0x3d, 0x4d, 0x4c, 0x15, 0x0b, 0x00,
0x67, 0x78, 0xf7, 0xab, 0x02, 0xb6, 0x0a, 0xde, 0x80, 0x45, 0x1c, 0xfe, 0x0b, 0x36, 0xa5, 0xc3,
0x32, 0xa9, 0xad, 0xa5, 0x89, 0xb9, 0xf9, 0x42, 0xb8, 0x2b, 0x51, 0xf8, 0x18, 0x68, 0x72, 0x5a,
0x1c, 0x36, 0xc9, 0x4a, 0xb0, 0x77, 0x45, 0x33, 0x83, 0x1c, 0xbb, 0x4a, 0xcc, 0x7f, 0x56, 0x37,
0x01, 0x15, 0xcf, 0xb8, 0x24, 0x8b, 0x34, 0x21, 0x8b, 0xb8, 0xfc, 0x48, 0x35, 0x4b, 0x23, 0xd2,
0x63, 0x89, 0xc2, 0x3e, 0x68, 0x90, 0x30, 0x2c, 0x68, 0xf2, 0x0b, 0x75, 0xfb, 0xaf, 0x34, 0x31,
0x1b, 0x87, 0x15, 0x8c, 0x97, 0x63, 0xba, 0x97, 0x1b, 0xa0, 0x59, 0x34, 0xf2, 0x7a, 0xe2, 0x3b,
0x14, 0xbe, 0x07, 0x9a, 0x58, 0x2a, 0x97, 0x70, 0x22, 0xbb, 0x69, 0xec, 0xdd, 0x5e, 0xfa, 0xb3,
0x72, 0x37, 0x50, 0x38, 0xf6, 0x04, 0x10, 0x23, 0x11, 0x5d, 0x8d, 0xc5, 0x73, 0xca, 0x49, 0x35,
0x93, 0x15, 0x86, 0x4b, 0x55, 0xf8, 0x10, 0x34, 0xf2, 0x2d, 0x38, 0x5e, 0x84, 0x34, 0x2f, 0xb3,
0x9b, 0x53, 0x1a, 0x87, 0xd5, 0xd3, 0xd5, 0xf5, 0x2b, 0x5e, 0xa6, 0xc1, 0x37, 0x40, 0xa7, 0x79,
0xe1, 0x62, 0x7b, 0xc4, 0x70, 0xdd, 0x58, 0x73, 0xb8, 0xec, 0x9d, 0x3c, 0x99, 0x5e, 0x20, 0x31,
0xae, 0xc4, 0xe0, 0x00, 0xa8, 0xc2, 0xce, 0xb8, 0x55, 0x97, 0xaa, 0xbb, 0x6b, 0xaa, 0x8a, 0x8f,
0xb0, 0x9b, 0xb9, 0xb2, 0x2a, 0x6e, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xcd, 0xe5,
0x67, 0x7e, 0xcc, 0xe1, 0xbb, 0x15, 0xa7, 0xd1, 0x7a, 0x4e, 0x0b, 0xb6, 0xf4, 0xb9, 0x5c, 0x8b,
0x02, 0x59, 0x72, 0xf9, 0x15, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd7, 0xec, 0x42, 0x96,
0x57, 0xb5, 0xf1, 0x54, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8,
0x9d, 0x5f, 0x18, 0xb5, 0x8f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a,
0x28, 0xdf, 0x52, 0x43, 0xf9, 0x74, 0x69, 0xd4, 0xde, 0x6a, 0x85, 0xe6, 0xf7, 0x00, 0x00, 0x00,
0xff, 0xff, 0x65, 0x85, 0x5a, 0x9b, 0x75, 0x06, 0x00, 0x00,
}
func (m *Endpoint) Marshal() (dAtA []byte, err error) {
@ -387,6 +388,13 @@ func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.AppProtocol != nil {
i -= len(*m.AppProtocol)
copy(dAtA[i:], *m.AppProtocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol)))
i--
dAtA[i] = 0x22
}
if m.Port != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
i--
@ -429,13 +437,11 @@ func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.AddressType != nil {
i -= len(*m.AddressType)
copy(dAtA[i:], *m.AddressType)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AddressType)))
i--
dAtA[i] = 0x22
}
i -= len(m.AddressType)
copy(dAtA[i:], m.AddressType)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType)))
i--
dAtA[i] = 0x22
if len(m.Ports) > 0 {
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
{
@ -597,6 +603,10 @@ func (m *EndpointPort) Size() (n int) {
if m.Port != nil {
n += 1 + sovGenerated(uint64(*m.Port))
}
if m.AppProtocol != nil {
l = len(*m.AppProtocol)
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
@ -620,10 +630,8 @@ func (m *EndpointSlice) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
if m.AddressType != nil {
l = len(*m.AddressType)
n += 1 + l + sovGenerated(uint64(l))
}
l = len(m.AddressType)
n += 1 + l + sovGenerated(uint64(l))
return n
}
@ -692,6 +700,7 @@ func (this *EndpointPort) String() string {
`Name:` + valueToStringGenerated(this.Name) + `,`,
`Protocol:` + valueToStringGenerated(this.Protocol) + `,`,
`Port:` + valueToStringGenerated(this.Port) + `,`,
`AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`,
`}`,
}, "")
return s
@ -714,7 +723,7 @@ func (this *EndpointSlice) String() string {
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Endpoints:` + repeatedStringForEndpoints + `,`,
`Ports:` + repeatedStringForPorts + `,`,
`AddressType:` + valueToStringGenerated(this.AddressType) + `,`,
`AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`,
`}`,
}, "")
return s
@ -1246,6 +1255,39 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error {
}
}
m.Port = &v
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.AppProtocol = &s
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@ -1430,8 +1472,7 @@ func (m *EndpointSlice) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := AddressType(dAtA[iNdEx:postIndex])
m.AddressType = &s
m.AddressType = AddressType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex

View File

@ -32,11 +32,10 @@ option go_package = "v1alpha1";
// Endpoint represents a single logical "backend" implementing a service.
message Endpoint {
// addresses of this endpoint. The contents of this field are interpreted
// according to the corresponding EndpointSlice addressType field. This
// allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers
// (e.g. kube-proxy) must handle different types of addresses in the context
// of their own capabilities. This must contain at least one address but no
// more than 100.
// according to the corresponding EndpointSlice addressType field. Consumers
// must handle different types of addresses in the context of their own
// capabilities. This must contain at least one address but no more than
// 100.
// +listType=set
repeated string addresses = 1;
@ -87,11 +86,10 @@ message EndpointPort {
// The name of this port. All ports in an EndpointSlice must have a unique
// name. If the EndpointSlice is dervied from a Kubernetes service, this
// corresponds to the Service.ports[].name.
// Name must either be an empty string or pass IANA_SVC_NAME validation:
// * must be no more than 15 characters long
// * may contain only [-a-z0-9]
// * must contain at least one letter [a-z]
// * it must not start or end with a hyphen, nor contain adjacent hyphens
// Name must either be an empty string or pass DNS_LABEL validation:
// * must be no more than 63 characters long.
// * must consist of lower case alphanumeric characters or '-'.
// * must start and end with an alphanumeric character.
// Default is empty string.
optional string name = 1;
@ -104,6 +102,14 @@ message EndpointPort {
// If this is not specified, ports are not restricted and must be
// interpreted in the context of the specific consumer.
optional int32 port = 3;
// The application protocol for this port.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names.
// Default is empty string.
optional string appProtocol = 4;
}
// EndpointSlice represents a subset of the endpoints that implement a service.
@ -115,9 +121,12 @@ message EndpointSlice {
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// addressType specifies the type of address carried by this EndpointSlice.
// All addresses in this slice must be the same type.
// Default is IP
// +optional
// All addresses in this slice must be the same type. This field is
// immutable after creation. The following address types are currently
// supported:
// * IPv4: Represents an IPv4 Address.
// * IPv6: Represents an IPv6 Address.
// * FQDN: Represents a Fully Qualified Domain Name.
optional string addressType = 4;
// endpoints is a list of unique endpoints in this slice. Each slice may

View File

@ -33,10 +33,13 @@ type EndpointSlice struct {
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// addressType specifies the type of address carried by this EndpointSlice.
// All addresses in this slice must be the same type.
// Default is IP
// +optional
AddressType *AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
// All addresses in this slice must be the same type. This field is
// immutable after creation. The following address types are currently
// supported:
// * IPv4: Represents an IPv4 Address.
// * IPv6: Represents an IPv6 Address.
// * FQDN: Represents a Fully Qualified Domain Name.
AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
// endpoints is a list of unique endpoints in this slice. Each slice may
// include a maximum of 1000 endpoints.
// +listType=atomic
@ -56,17 +59,26 @@ type AddressType string
const (
// AddressTypeIP represents an IP Address.
// This address type has been deprecated and has been replaced by the IPv4
// and IPv6 adddress types. New resources with this address type will be
// considered invalid. This will be fully removed in 1.18.
// +deprecated
AddressTypeIP = AddressType("IP")
// AddressTypeIPv4 represents an IPv4 Address.
AddressTypeIPv4 = AddressType(v1.IPv4Protocol)
// AddressTypeIPv6 represents an IPv6 Address.
AddressTypeIPv6 = AddressType(v1.IPv6Protocol)
// AddressTypeFQDN represents a FQDN.
AddressTypeFQDN = AddressType("FQDN")
)
// Endpoint represents a single logical "backend" implementing a service.
type Endpoint struct {
// addresses of this endpoint. The contents of this field are interpreted
// according to the corresponding EndpointSlice addressType field. This
// allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers
// (e.g. kube-proxy) must handle different types of addresses in the context
// of their own capabilities. This must contain at least one address but no
// more than 100.
// according to the corresponding EndpointSlice addressType field. Consumers
// must handle different types of addresses in the context of their own
// capabilities. This must contain at least one address but no more than
// 100.
// +listType=set
Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
// conditions contains information about the current status of the endpoint.
@ -113,11 +125,10 @@ type EndpointPort struct {
// The name of this port. All ports in an EndpointSlice must have a unique
// name. If the EndpointSlice is dervied from a Kubernetes service, this
// corresponds to the Service.ports[].name.
// Name must either be an empty string or pass IANA_SVC_NAME validation:
// * must be no more than 15 characters long
// * may contain only [-a-z0-9]
// * must contain at least one letter [a-z]
// * it must not start or end with a hyphen, nor contain adjacent hyphens
// Name must either be an empty string or pass DNS_LABEL validation:
// * must be no more than 63 characters long.
// * must consist of lower case alphanumeric characters or '-'.
// * must start and end with an alphanumeric character.
// Default is empty string.
Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"`
// The IP protocol for this port.
@ -128,6 +139,13 @@ type EndpointPort struct {
// If this is not specified, ports are not restricted and must be
// interpreted in the context of the specific consumer.
Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"`
// The application protocol for this port.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names.
// Default is empty string.
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -29,7 +29,7 @@ package v1alpha1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_Endpoint = map[string]string{
"": "Endpoint represents a single logical \"backend\" implementing a service.",
"addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. This allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers (e.g. kube-proxy) must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.",
"addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.",
"conditions": "conditions contains information about the current status of the endpoint.",
"hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.",
"targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.",
@ -50,10 +50,11 @@ func (EndpointConditions) SwaggerDoc() map[string]string {
}
var map_EndpointPort = map[string]string{
"": "EndpointPort represents a Port used by an EndpointSlice",
"name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass IANA_SVC_NAME validation: * must be no more than 15 characters long * may contain only [-a-z0-9] * must contain at least one letter [a-z] * it must not start or end with a hyphen, nor contain adjacent hyphens Default is empty string.",
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
"port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
"": "EndpointPort represents a Port used by an EndpointSlice",
"name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
"port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.",
}
func (EndpointPort) SwaggerDoc() map[string]string {
@ -63,7 +64,7 @@ func (EndpointPort) SwaggerDoc() map[string]string {
var map_EndpointSlice = map[string]string{
"": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.",
"metadata": "Standard object's metadata.",
"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. Default is IP",
"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.",
"endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.",
"ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.",
}

View File

@ -19,4 +19,10 @@ package v1alpha1
const (
// LabelServiceName is used to indicate the name of a Kubernetes service.
LabelServiceName = "kubernetes.io/service-name"
// LabelManagedBy is used to indicate the controller or entity that manages
// an EndpointSlice. This label aims to enable different EndpointSlice
// objects to be managed by different controllers or entities within the
// same cluster. It is highly recommended to configure this label for all
// EndpointSlices.
LabelManagedBy = "endpointslice.kubernetes.io/managed-by"
)

View File

@ -103,6 +103,11 @@ func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
*out = new(int32)
**out = **in
}
if in.AppProtocol != nil {
in, out := &in.AppProtocol, &out.AppProtocol
*out = new(string)
**out = **in
}
return
}
@ -121,11 +126,6 @@ func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.AddressType != nil {
in, out := &in.AddressType, &out.AddressType
*out = new(AddressType)
**out = **in
}
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]Endpoint, len(*in))

25
vendor/k8s.io/api/discovery/v1beta1/BUILD.bazel generated vendored Normal file
View File

@ -0,0 +1,25 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated.pb.go",
"register.go",
"types.go",
"types_swagger_doc_generated.go",
"well_known_labels.go",
"zz_generated.deepcopy.go",
],
importmap = "k8s.io/kops/vendor/k8s.io/api/discovery/v1beta1",
importpath = "k8s.io/api/discovery/v1beta1",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/github.com/gogo/protobuf/sortkeys:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
],
)

22
vendor/k8s.io/api/discovery/v1beta1/doc.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +groupName=discovery.k8s.io
package v1beta1 // import "k8s.io/api/discovery/v1beta1"

1730
vendor/k8s.io/api/discovery/v1beta1/generated.pb.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

157
vendor/k8s.io/api/discovery/v1beta1/generated.proto generated vendored Normal file
View File

@ -0,0 +1,157 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.api.discovery.v1beta1;
import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1beta1";
// Endpoint represents a single logical "backend" implementing a service.
message Endpoint {
// addresses of this endpoint. The contents of this field are interpreted
// according to the corresponding EndpointSlice addressType field. Consumers
// must handle different types of addresses in the context of their own
// capabilities. This must contain at least one address but no more than
// 100.
// +listType=set
repeated string addresses = 1;
// conditions contains information about the current status of the endpoint.
optional EndpointConditions conditions = 2;
// hostname of this endpoint. This field may be used by consumers of
// endpoints to distinguish endpoints from each other (e.g. in DNS names).
// Multiple endpoints which use the same hostname should be considered
// fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123)
// validation.
// +optional
optional string hostname = 3;
// targetRef is a reference to a Kubernetes object that represents this
// endpoint.
// +optional
optional k8s.io.api.core.v1.ObjectReference targetRef = 4;
// topology contains arbitrary topology information associated with the
// endpoint. These key/value pairs must conform with the label format.
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
// Topology may include a maximum of 16 key/value pairs. This includes, but
// is not limited to the following well known keys:
// * kubernetes.io/hostname: the value indicates the hostname of the node
// where the endpoint is located. This should match the corresponding
// node label.
// * topology.kubernetes.io/zone: the value indicates the zone where the
// endpoint is located. This should match the corresponding node label.
// * topology.kubernetes.io/region: the value indicates the region where the
// endpoint is located. This should match the corresponding node label.
// +optional
map<string, string> topology = 5;
}
// EndpointConditions represents the current condition of an endpoint.
message EndpointConditions {
// ready indicates that this endpoint is prepared to receive traffic,
// according to whatever system is managing the endpoint. A nil value
// indicates an unknown state. In most cases consumers should interpret this
// unknown state as ready.
// +optional
optional bool ready = 1;
}
// EndpointPort represents a Port used by an EndpointSlice
message EndpointPort {
// The name of this port. All ports in an EndpointSlice must have a unique
// name. If the EndpointSlice is dervied from a Kubernetes service, this
// corresponds to the Service.ports[].name.
// Name must either be an empty string or pass DNS_LABEL validation:
// * must be no more than 63 characters long.
// * must consist of lower case alphanumeric characters or '-'.
// * must start and end with an alphanumeric character.
// Default is empty string.
optional string name = 1;
// The IP protocol for this port.
// Must be UDP, TCP, or SCTP.
// Default is TCP.
optional string protocol = 2;
// The port number of the endpoint.
// If this is not specified, ports are not restricted and must be
// interpreted in the context of the specific consumer.
optional int32 port = 3;
// The application protocol for this port.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names.
// Default is empty string.
optional string appProtocol = 4;
}
// EndpointSlice represents a subset of the endpoints that implement a service.
// For a given service there may be multiple EndpointSlice objects, selected by
// labels, which must be joined to produce the full set of endpoints.
message EndpointSlice {
// Standard object's metadata.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// addressType specifies the type of address carried by this EndpointSlice.
// All addresses in this slice must be the same type. This field is
// immutable after creation. The following address types are currently
// supported:
// * IPv4: Represents an IPv4 Address.
// * IPv6: Represents an IPv6 Address.
// * FQDN: Represents a Fully Qualified Domain Name.
optional string addressType = 4;
// endpoints is a list of unique endpoints in this slice. Each slice may
// include a maximum of 1000 endpoints.
// +listType=atomic
repeated Endpoint endpoints = 2;
// ports specifies the list of network ports exposed by each endpoint in
// this slice. Each port must have a unique name. When ports is empty, it
// indicates that there are no defined ports. When a port is defined with a
// nil port value, it indicates "all ports". Each slice may include a
// maximum of 100 ports.
// +optional
// +listType=atomic
repeated EndpointPort ports = 3;
}
// EndpointSliceList represents a list of endpoint slices
message EndpointSliceList {
// Standard list metadata.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of endpoint slices
// +listType=set
repeated EndpointSlice items = 2;
}

56
vendor/k8s.io/api/discovery/v1beta1/register.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "discovery.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&EndpointSlice{},
&EndpointSliceList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

162
vendor/k8s.io/api/discovery/v1beta1/types.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointSlice represents a subset of the endpoints that implement a service.
// For a given service there may be multiple EndpointSlice objects, selected by
// labels, which must be joined to produce the full set of endpoints.
type EndpointSlice struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// addressType specifies the type of address carried by this EndpointSlice.
// All addresses in this slice must be the same type. This field is
// immutable after creation. The following address types are currently
// supported:
// * IPv4: Represents an IPv4 Address.
// * IPv6: Represents an IPv6 Address.
// * FQDN: Represents a Fully Qualified Domain Name.
AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
// endpoints is a list of unique endpoints in this slice. Each slice may
// include a maximum of 1000 endpoints.
// +listType=atomic
Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"`
// ports specifies the list of network ports exposed by each endpoint in
// this slice. Each port must have a unique name. When ports is empty, it
// indicates that there are no defined ports. When a port is defined with a
// nil port value, it indicates "all ports". Each slice may include a
// maximum of 100 ports.
// +optional
// +listType=atomic
Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"`
}
// AddressType represents the type of address referred to by an endpoint.
type AddressType string
const (
// AddressTypeIP represents an IP Address.
// This address type has been deprecated and has been replaced by the IPv4
// and IPv6 adddress types. New resources with this address type will be
// considered invalid. This will be fully removed in 1.18.
// +deprecated
AddressTypeIP = AddressType("IP")
// AddressTypeIPv4 represents an IPv4 Address.
AddressTypeIPv4 = AddressType(v1.IPv4Protocol)
// AddressTypeIPv6 represents an IPv6 Address.
AddressTypeIPv6 = AddressType(v1.IPv6Protocol)
// AddressTypeFQDN represents a FQDN.
AddressTypeFQDN = AddressType("FQDN")
)
// Endpoint represents a single logical "backend" implementing a service.
type Endpoint struct {
// addresses of this endpoint. The contents of this field are interpreted
// according to the corresponding EndpointSlice addressType field. Consumers
// must handle different types of addresses in the context of their own
// capabilities. This must contain at least one address but no more than
// 100.
// +listType=set
Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
// conditions contains information about the current status of the endpoint.
Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"`
// hostname of this endpoint. This field may be used by consumers of
// endpoints to distinguish endpoints from each other (e.g. in DNS names).
// Multiple endpoints which use the same hostname should be considered
// fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123)
// validation.
// +optional
Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
// targetRef is a reference to a Kubernetes object that represents this
// endpoint.
// +optional
TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"`
// topology contains arbitrary topology information associated with the
// endpoint. These key/value pairs must conform with the label format.
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
// Topology may include a maximum of 16 key/value pairs. This includes, but
// is not limited to the following well known keys:
// * kubernetes.io/hostname: the value indicates the hostname of the node
// where the endpoint is located. This should match the corresponding
// node label.
// * topology.kubernetes.io/zone: the value indicates the zone where the
// endpoint is located. This should match the corresponding node label.
// * topology.kubernetes.io/region: the value indicates the region where the
// endpoint is located. This should match the corresponding node label.
// +optional
Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"`
}
// EndpointConditions represents the current condition of an endpoint.
type EndpointConditions struct {
// ready indicates that this endpoint is prepared to receive traffic,
// according to whatever system is managing the endpoint. A nil value
// indicates an unknown state. In most cases consumers should interpret this
// unknown state as ready.
// +optional
Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"`
}
// EndpointPort represents a Port used by an EndpointSlice
type EndpointPort struct {
// The name of this port. All ports in an EndpointSlice must have a unique
// name. If the EndpointSlice is dervied from a Kubernetes service, this
// corresponds to the Service.ports[].name.
// Name must either be an empty string or pass DNS_LABEL validation:
// * must be no more than 63 characters long.
// * must consist of lower case alphanumeric characters or '-'.
// * must start and end with an alphanumeric character.
// Default is empty string.
Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"`
// The IP protocol for this port.
// Must be UDP, TCP, or SCTP.
// Default is TCP.
Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"`
// The port number of the endpoint.
// If this is not specified, ports are not restricted and must be
// interpreted in the context of the specific consumer.
Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"`
// The application protocol for this port.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names.
// Default is empty string.
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointSliceList represents a list of endpoint slices
type EndpointSliceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of endpoint slices
// +listType=set
Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
}

View File

@ -0,0 +1,86 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_Endpoint = map[string]string{
"": "Endpoint represents a single logical \"backend\" implementing a service.",
"addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.",
"conditions": "conditions contains information about the current status of the endpoint.",
"hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.",
"targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.",
"topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.",
}
func (Endpoint) SwaggerDoc() map[string]string {
return map_Endpoint
}
var map_EndpointConditions = map[string]string{
"": "EndpointConditions represents the current condition of an endpoint.",
"ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.",
}
func (EndpointConditions) SwaggerDoc() map[string]string {
return map_EndpointConditions
}
var map_EndpointPort = map[string]string{
"": "EndpointPort represents a Port used by an EndpointSlice",
"name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
"port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.",
}
func (EndpointPort) SwaggerDoc() map[string]string {
return map_EndpointPort
}
var map_EndpointSlice = map[string]string{
"": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.",
"metadata": "Standard object's metadata.",
"addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.",
"endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.",
"ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.",
}
func (EndpointSlice) SwaggerDoc() map[string]string {
return map_EndpointSlice
}
var map_EndpointSliceList = map[string]string{
"": "EndpointSliceList represents a list of endpoint slices",
"metadata": "Standard list metadata.",
"items": "List of endpoint slices",
}
func (EndpointSliceList) SwaggerDoc() map[string]string {
return map_EndpointSliceList
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -0,0 +1,28 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
const (
// LabelServiceName is used to indicate the name of a Kubernetes service.
LabelServiceName = "kubernetes.io/service-name"
// LabelManagedBy is used to indicate the controller or entity that manages
// an EndpointSlice. This label aims to enable different EndpointSlice
// objects to be managed by different controllers or entities within the
// same cluster. It is highly recommended to configure this label for all
// EndpointSlices.
LabelManagedBy = "endpointslice.kubernetes.io/managed-by"
)

View File

@ -0,0 +1,195 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Endpoint) DeepCopyInto(out *Endpoint) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Conditions.DeepCopyInto(&out.Conditions)
if in.Hostname != nil {
in, out := &in.Hostname, &out.Hostname
*out = new(string)
**out = **in
}
if in.TargetRef != nil {
in, out := &in.TargetRef, &out.TargetRef
*out = new(v1.ObjectReference)
**out = **in
}
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
func (in *Endpoint) DeepCopy() *Endpoint {
if in == nil {
return nil
}
out := new(Endpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) {
*out = *in
if in.Ready != nil {
in, out := &in.Ready, &out.Ready
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions.
func (in *EndpointConditions) DeepCopy() *EndpointConditions {
if in == nil {
return nil
}
out := new(EndpointConditions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
*out = *in
if in.Name != nil {
in, out := &in.Name, &out.Name
*out = new(string)
**out = **in
}
if in.Protocol != nil {
in, out := &in.Protocol, &out.Protocol
*out = new(v1.Protocol)
**out = **in
}
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(int32)
**out = **in
}
if in.AppProtocol != nil {
in, out := &in.AppProtocol, &out.AppProtocol
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
func (in *EndpointPort) DeepCopy() *EndpointPort {
if in == nil {
return nil
}
out := new(EndpointPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]Endpoint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]EndpointPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice.
func (in *EndpointSlice) DeepCopy() *EndpointSlice {
if in == nil {
return nil
}
out := new(EndpointSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EndpointSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]EndpointSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList.
func (in *EndpointSliceList) DeepCopy() *EndpointSliceList {
if in == nil {
return nil
}
out := new(EndpointSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@ -985,7 +985,7 @@ type AllowedHostPath struct {
// Deprecated: use FSType from policy API Group instead.
type FSType string
var (
const (
AzureFile FSType = "azureFile"
Flocker FSType = "flocker"
FlexVolume FSType = "flexVolume"

22
vendor/k8s.io/api/flowcontrol/v1alpha1/BUILD.bazel generated vendored Normal file
View File

@ -0,0 +1,22 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated.pb.go",
"register.go",
"types.go",
"types_swagger_doc_generated.go",
"zz_generated.deepcopy.go",
],
importmap = "k8s.io/kops/vendor/k8s.io/api/flowcontrol/v1alpha1",
importpath = "k8s.io/api/flowcontrol/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
],
)

24
vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +groupName=flowcontrol.apiserver.k8s.io
// Package v1alpha1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io".
package v1alpha1 // import "k8s.io/api/flowcontrol/v1alpha1"

5459
vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

436
vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto generated vendored Normal file
View File

@ -0,0 +1,436 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.api.flowcontrol.v1alpha1;
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1alpha1";
// FlowDistinguisherMethod specifies the method of a flow distinguisher.
message FlowDistinguisherMethod {
// `type` is the type of flow distinguisher method
// The supported types are "ByUser" and "ByNamespace".
// Required.
optional string type = 1;
}
// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
message FlowSchema {
// `metadata` is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// `spec` is the specification of the desired behavior of a FlowSchema.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional FlowSchemaSpec spec = 2;
// `status` is the current status of a FlowSchema.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional FlowSchemaStatus status = 3;
}
// FlowSchemaCondition describes conditions for a FlowSchema.
message FlowSchemaCondition {
// `type` is the type of the condition.
// Required.
optional string type = 1;
// `status` is the status of the condition.
// Can be True, False, Unknown.
// Required.
optional string status = 2;
// `lastTransitionTime` is the last time the condition transitioned from one status to another.
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
// `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
optional string reason = 4;
// `message` is a human-readable message indicating details about last transition.
optional string message = 5;
}
// FlowSchemaList is a list of FlowSchema objects.
message FlowSchemaList {
// `metadata` is the standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// `items` is a list of FlowSchemas.
// +listType=set
repeated FlowSchema items = 2;
}
// FlowSchemaSpec describes how the FlowSchema's specification looks like.
message FlowSchemaSpec {
// `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot
// be resolved, the FlowSchema will be ignored and marked as invalid in its status.
// Required.
optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1;
// `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
// FlowSchema is among those with the numerically lowest (which we take to be logically highest)
// MatchingPrecedence. Each MatchingPrecedence value must be non-negative.
// Note that if the precedence is not specified or zero, it will be set to 1000 as default.
// +optional
optional int32 matchingPrecedence = 2;
// `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema.
// `nil` specifies that the distinguisher is disabled and thus will always be the empty string.
// +optional
optional FlowDistinguisherMethod distinguisherMethod = 3;
// `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
// at least one member of rules matches the request.
// if it is an empty slice, there will be no requests matching the FlowSchema.
// +listType=set
// +optional
repeated PolicyRulesWithSubjects rules = 4;
}
// FlowSchemaStatus represents the current state of a FlowSchema.
message FlowSchemaStatus {
// `conditions` is a list of the current states of FlowSchema.
// +listType=map
// +listMapKey=type
// +optional
repeated FlowSchemaCondition conditions = 1;
}
// GroupSubject holds detailed information for group-kind subject.
message GroupSubject {
// name is the user group that matches, or "*" to match all user groups.
// See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some
// well-known group names.
// Required.
optional string name = 1;
}
// LimitResponse defines how to handle requests that can not be executed right now.
// +union
message LimitResponse {
// `type` is "Queue" or "Reject".
// "Queue" means that requests that can not be executed upon arrival
// are held in a queue until they can be executed or a queuing limit
// is reached.
// "Reject" means that requests that can not be executed upon arrival
// are rejected.
// Required.
// +unionDiscriminator
optional string type = 1;
// `queuing` holds the configuration parameters for queuing.
// This field may be non-empty only if `type` is `"Queue"`.
// +optional
optional QueuingConfiguration queuing = 2;
}
// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
// It addresses two issues:
// * How are requests for this priority level limited?
// * What should be done with requests that exceed the limit?
message LimitedPriorityLevelConfiguration {
// `assuredConcurrencyShares` (ACS) configures the execution
// limit, which is a limit on the number of requests of this
// priority level that may be exeucting at a given time. ACS must
// be a positive number. The server's concurrency limit (SCL) is
// divided among the concurrency-controlled priority levels in
// proportion to their assured concurrency shares. This produces
// the assured concurrency value (ACV) --- the number of requests
// that may be executing at a time --- for each such priority
// level:
//
// ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
//
// bigger numbers of ACS mean more reserved concurrent requests (at the
// expense of every other PL).
// This field has a default value of 30.
// +optional
optional int32 assuredConcurrencyShares = 1;
// `limitResponse` indicates what to do with requests that can not be executed right now
optional LimitResponse limitResponse = 2;
}
// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member
// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.
message NonResourcePolicyRule {
// `verbs` is a list of matching verbs and may not be empty.
// "*" matches all verbs. If it is present, it must be the only entry.
// +listType=set
// Required.
repeated string verbs = 1;
// `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty.
// For example:
// - "/healthz" is legal
// - "/hea*" is illegal
// - "/hea" is legal but matches nothing
// - "/hea/*" also matches nothing
// - "/healthz/*" matches all per-component health checks.
// "*" matches all non-resource urls. if it is present, it must be the only entry.
// +listType=set
// Required.
repeated string nonResourceURLs = 6;
}
// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject
// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches
// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member
// of resourceRules or nonResourceRules matches the request.
message PolicyRulesWithSubjects {
// subjects is the list of normal user, serviceaccount, or group that this rule cares about.
// There must be at least one member in this slice.
// A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
// +listType=set
// Required.
repeated Subject subjects = 1;
// `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
// target resource.
// At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
// +listType=set
// +optional
repeated ResourcePolicyRule resourceRules = 2;
// `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
// and the target non-resource URL.
// +listType=set
// +optional
repeated NonResourcePolicyRule nonResourceRules = 3;
}
// PriorityLevelConfiguration represents the configuration of a priority level.
message PriorityLevelConfiguration {
// `metadata` is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// `spec` is the specification of the desired behavior of a "request-priority".
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional PriorityLevelConfigurationSpec spec = 2;
// `status` is the current status of a "request-priority".
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional PriorityLevelConfigurationStatus status = 3;
}
// PriorityLevelConfigurationCondition defines the condition of priority level.
message PriorityLevelConfigurationCondition {
// `type` is the type of the condition.
// Required.
optional string type = 1;
// `status` is the status of the condition.
// Can be True, False, Unknown.
// Required.
optional string status = 2;
// `lastTransitionTime` is the last time the condition transitioned from one status to another.
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
// `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
optional string reason = 4;
// `message` is a human-readable message indicating details about last transition.
optional string message = 5;
}
// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.
message PriorityLevelConfigurationList {
// `metadata` is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// `items` is a list of request-priorities.
// +listType=set
repeated PriorityLevelConfiguration items = 2;
}
// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used.
message PriorityLevelConfigurationReference {
// `name` is the name of the priority level configuration being referenced
// Required.
optional string name = 1;
}
// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
// +union
message PriorityLevelConfigurationSpec {
// `type` indicates whether this priority level is subject to
// limitation on request execution. A value of `"Exempt"` means
// that requests of this priority level are not subject to a limit
// (and thus are never queued) and do not detract from the
// capacity made available to other priority levels. A value of
// `"Limited"` means that (a) requests of this priority level
// _are_ subject to limits and (b) some of the server's limited
// capacity is made available exclusively to this priority level.
// Required.
// +unionDiscriminator
optional string type = 1;
// `limited` specifies how requests are handled for a Limited priority level.
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
optional LimitedPriorityLevelConfiguration limited = 2;
}
// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
message PriorityLevelConfigurationStatus {
// `conditions` is the current state of "request-priority".
// +listType=map
// +listMapKey=type
// +optional
repeated PriorityLevelConfigurationCondition conditions = 1;
}
// QueuingConfiguration holds the configuration parameters for queuing
message QueuingConfiguration {
// `queues` is the number of queues for this priority level. The
// queues exist independently at each apiserver. The value must be
// positive. Setting it to 1 effectively precludes
// shufflesharding and thus makes the distinguisher method of
// associated flow schemas irrelevant. This field has a default
// value of 64.
// +optional
optional int32 queues = 1;
// `handSize` is a small positive number that configures the
// shuffle sharding of requests into queues. When enqueuing a request
// at this priority level the request's flow identifier (a string
// pair) is hashed and the hash value is used to shuffle the list
// of queues and deal a hand of the size specified here. The
// request is put into one of the shortest queues in that hand.
// `handSize` must be no larger than `queues`, and should be
// significantly smaller (so that a few heavy flows do not
// saturate most of the queues). See the user-facing
// documentation for more extensive guidance on setting this
// field. This field has a default value of 8.
// +optional
optional int32 handSize = 2;
// `queueLengthLimit` is the maximum number of requests allowed to
// be waiting in a given queue of this priority level at a time;
// excess requests are rejected. This value must be positive. If
// not specified, it will be defaulted to 50.
// +optional
optional int32 queueLengthLimit = 3;
}
// ResourcePolicyRule is a predicate that matches some resource
// requests, testing the request's verb and the target resource. A
// ResourcePolicyRule matches a resource request if and only if: (a)
// at least one member of verbs matches the request, (b) at least one
// member of apiGroups matches the request, (c) at least one member of
// resources matches the request, and (d) least one member of
// namespaces matches the request.
message ResourcePolicyRule {
// `verbs` is a list of matching verbs and may not be empty.
// "*" matches all verbs and, if present, must be the only entry.
// +listType=set
// Required.
repeated string verbs = 1;
// `apiGroups` is a list of matching API groups and may not be empty.
// "*" matches all API groups and, if present, must be the only entry.
// +listType=set
// Required.
repeated string apiGroups = 2;
// `resources` is a list of matching resources (i.e., lowercase
// and plural) with, if desired, subresource. For example, [
// "services", "nodes/status" ]. This list may not be empty.
// "*" matches all resources and, if present, must be the only entry.
// Required.
// +listType=set
repeated string resources = 3;
// `clusterScope` indicates whether to match requests that do not
// specify a namespace (which happens either because the resource
// is not namespaced or the request targets all namespaces).
// If this field is omitted or false then the `namespaces` field
// must contain a non-empty list.
// +optional
optional bool clusterScope = 4;
// `namespaces` is a list of target namespaces that restricts
// matches. A request that specifies a target namespace matches
// only if either (a) this list contains that target namespace or
// (b) this list contains "*". Note that "*" matches any
// specified namespace but does not match a request that _does
// not specify_ a namespace (see the `clusterScope` field for
// that).
// This list may be empty, but only if `clusterScope` is true.
// +optional
// +listType=set
repeated string namespaces = 5;
}
// ServiceAccountSubject holds detailed information for service-account-kind subject.
message ServiceAccountSubject {
// `namespace` is the namespace of matching ServiceAccount objects.
// Required.
optional string namespace = 1;
// `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name.
// Required.
optional string name = 2;
}
// Subject matches the originator of a request, as identified by the request authentication system. There are three
// ways of matching an originator; by user, group, or service account.
// +union
message Subject {
// Required
// +unionDiscriminator
optional string kind = 1;
// +optional
optional UserSubject user = 2;
// +optional
optional GroupSubject group = 3;
// +optional
optional ServiceAccountSubject serviceAccount = 4;
}
// UserSubject holds detailed information for user-kind subject.
message UserSubject {
// `name` is the username that matches, or "*" to match all usernames.
// Required.
optional string name = 1;
}

58
vendor/k8s.io/api/flowcontrol/v1alpha1/register.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the name of api group
const GroupName = "flowcontrol.apiserver.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder installs the api group to a scheme
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme adds api to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&FlowSchema{},
&FlowSchemaList{},
&PriorityLevelConfiguration{},
&PriorityLevelConfigurationList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

513
vendor/k8s.io/api/flowcontrol/v1alpha1/types.go generated vendored Normal file
View File

@ -0,0 +1,513 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// These are valid wildcards.
const (
APIGroupAll = "*"
ResourceAll = "*"
VerbAll = "*"
NonResourceAll = "*"
NameAll = "*"
NamespaceEvery = "*" // matches every particular namespace
)
// System preset priority level names
const (
PriorityLevelConfigurationNameExempt = "exempt"
)
// Conditions
const (
FlowSchemaConditionDangling = "Dangling"
PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
type FlowSchema struct {
metav1.TypeMeta `json:",inline"`
// `metadata` is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// `spec` is the specification of the desired behavior of a FlowSchema.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// `status` is the current status of a FlowSchema.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// FlowSchemaList is a list of FlowSchema objects.
type FlowSchemaList struct {
metav1.TypeMeta `json:",inline"`
// `metadata` is the standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// `items` is a list of FlowSchemas.
// +listType=set
Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// FlowSchemaSpec describes how the FlowSchema's specification looks like.
type FlowSchemaSpec struct {
// `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot
// be resolved, the FlowSchema will be ignored and marked as invalid in its status.
// Required.
PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"`
// `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
// FlowSchema is among those with the numerically lowest (which we take to be logically highest)
// MatchingPrecedence. Each MatchingPrecedence value must be non-negative.
// Note that if the precedence is not specified or zero, it will be set to 1000 as default.
// +optional
MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"`
// `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema.
// `nil` specifies that the distinguisher is disabled and thus will always be the empty string.
// +optional
DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"`
// `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
// at least one member of rules matches the request.
// if it is an empty slice, there will be no requests matching the FlowSchema.
// +listType=set
// +optional
Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"`
}
// FlowDistinguisherMethodType is the type of flow distinguisher method
type FlowDistinguisherMethodType string
// These are valid flow-distinguisher methods.
const (
// FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request.
// This type is used to provide some insulation between users.
FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser"
// FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the
// object that the request acts upon. If the object is not namespaced, or if the request is a non-resource
// request, then the distinguisher will be the empty string. An example usage of this type is to provide
// some insulation between tenants in a situation where there are multiple tenants and each namespace
// is dedicated to a tenant.
FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace"
)
// FlowDistinguisherMethod specifies the method of a flow distinguisher.
type FlowDistinguisherMethod struct {
// `type` is the type of flow distinguisher method
// The supported types are "ByUser" and "ByNamespace".
// Required.
Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"`
}
// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used.
type PriorityLevelConfigurationReference struct {
// `name` is the name of the priority level configuration being referenced
// Required.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
}
// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject
// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches
// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member
// of resourceRules or nonResourceRules matches the request.
type PolicyRulesWithSubjects struct {
// subjects is the list of normal user, serviceaccount, or group that this rule cares about.
// There must be at least one member in this slice.
// A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
// +listType=set
// Required.
Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"`
// `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
// target resource.
// At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
// +listType=set
// +optional
ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"`
// `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
// and the target non-resource URL.
// +listType=set
// +optional
NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"`
}
// Subject matches the originator of a request, as identified by the request authentication system. There are three
// ways of matching an originator; by user, group, or service account.
// +union
type Subject struct {
// Required
// +unionDiscriminator
Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"`
// +optional
User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
// +optional
Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"`
// +optional
ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"`
}
// SubjectKind is the kind of subject.
type SubjectKind string
// Supported subject's kinds.
const (
SubjectKindUser SubjectKind = "User"
SubjectKindGroup SubjectKind = "Group"
SubjectKindServiceAccount SubjectKind = "ServiceAccount"
)
// UserSubject holds detailed information for user-kind subject.
type UserSubject struct {
// `name` is the username that matches, or "*" to match all usernames.
// Required.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
}
// GroupSubject holds detailed information for group-kind subject.
type GroupSubject struct {
// name is the user group that matches, or "*" to match all user groups.
// See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some
// well-known group names.
// Required.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
}
// ServiceAccountSubject holds detailed information for service-account-kind subject.
type ServiceAccountSubject struct {
// `namespace` is the namespace of matching ServiceAccount objects.
// Required.
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
// `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name.
// Required.
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
}
// ResourcePolicyRule is a predicate that matches some resource
// requests, testing the request's verb and the target resource. A
// ResourcePolicyRule matches a resource request if and only if: (a)
// at least one member of verbs matches the request, (b) at least one
// member of apiGroups matches the request, (c) at least one member of
// resources matches the request, and (d) least one member of
// namespaces matches the request.
type ResourcePolicyRule struct {
// `verbs` is a list of matching verbs and may not be empty.
// "*" matches all verbs and, if present, must be the only entry.
// +listType=set
// Required.
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
// `apiGroups` is a list of matching API groups and may not be empty.
// "*" matches all API groups and, if present, must be the only entry.
// +listType=set
// Required.
APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"`
// `resources` is a list of matching resources (i.e., lowercase
// and plural) with, if desired, subresource. For example, [
// "services", "nodes/status" ]. This list may not be empty.
// "*" matches all resources and, if present, must be the only entry.
// Required.
// +listType=set
Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"`
// `clusterScope` indicates whether to match requests that do not
// specify a namespace (which happens either because the resource
// is not namespaced or the request targets all namespaces).
// If this field is omitted or false then the `namespaces` field
// must contain a non-empty list.
// +optional
ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"`
// `namespaces` is a list of target namespaces that restricts
// matches. A request that specifies a target namespace matches
// only if either (a) this list contains that target namespace or
// (b) this list contains "*". Note that "*" matches any
// specified namespace but does not match a request that _does
// not specify_ a namespace (see the `clusterScope` field for
// that).
// This list may be empty, but only if `clusterScope` is true.
// +optional
// +listType=set
Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"`
}
// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member
// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.
type NonResourcePolicyRule struct {
// `verbs` is a list of matching verbs and may not be empty.
// "*" matches all verbs. If it is present, it must be the only entry.
// +listType=set
// Required.
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
// `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty.
// For example:
// - "/healthz" is legal
// - "/hea*" is illegal
// - "/hea" is legal but matches nothing
// - "/hea/*" also matches nothing
// - "/healthz/*" matches all per-component health checks.
// "*" matches all non-resource urls. if it is present, it must be the only entry.
// +listType=set
// Required.
NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"`
}
// FlowSchemaStatus represents the current state of a FlowSchema.
type FlowSchemaStatus struct {
// `conditions` is a list of the current states of FlowSchema.
// +listType=map
// +listMapKey=type
// +optional
Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
}
// FlowSchemaCondition describes conditions for a FlowSchema.
type FlowSchemaCondition struct {
// `type` is the type of the condition.
// Required.
Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
// `status` is the status of the condition.
// Can be True, False, Unknown.
// Required.
Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
// `lastTransitionTime` is the last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// `message` is a human-readable message indicating details about last transition.
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type
type FlowSchemaConditionType string
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PriorityLevelConfiguration represents the configuration of a priority level.
type PriorityLevelConfiguration struct {
metav1.TypeMeta `json:",inline"`
// `metadata` is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// `spec` is the specification of the desired behavior of a "request-priority".
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// `status` is the current status of a "request-priority".
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.
type PriorityLevelConfigurationList struct {
metav1.TypeMeta `json:",inline"`
// `metadata` is the standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// `items` is a list of request-priorities.
// +listType=set
Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
// +union
type PriorityLevelConfigurationSpec struct {
// `type` indicates whether this priority level is subject to
// limitation on request execution. A value of `"Exempt"` means
// that requests of this priority level are not subject to a limit
// (and thus are never queued) and do not detract from the
// capacity made available to other priority levels. A value of
// `"Limited"` means that (a) requests of this priority level
// _are_ subject to limits and (b) some of the server's limited
// capacity is made available exclusively to this priority level.
// Required.
// +unionDiscriminator
Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"`
// `limited` specifies how requests are handled for a Limited priority level.
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"`
}
// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level
type PriorityLevelEnablement string
// Supported priority level enablement values.
const (
// PriorityLevelEnablementExempt means that requests are not subject to limits
PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt"
// PriorityLevelEnablementLimited means that requests are subject to limits
PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited"
)
// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
// It addresses two issues:
// * How are requests for this priority level limited?
// * What should be done with requests that exceed the limit?
type LimitedPriorityLevelConfiguration struct {
// `assuredConcurrencyShares` (ACS) configures the execution
// limit, which is a limit on the number of requests of this
// priority level that may be exeucting at a given time. ACS must
// be a positive number. The server's concurrency limit (SCL) is
// divided among the concurrency-controlled priority levels in
// proportion to their assured concurrency shares. This produces
// the assured concurrency value (ACV) --- the number of requests
// that may be executing at a time --- for each such priority
// level:
//
// ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
//
// bigger numbers of ACS mean more reserved concurrent requests (at the
// expense of every other PL).
// This field has a default value of 30.
// +optional
AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"`
// `limitResponse` indicates what to do with requests that can not be executed right now
LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"`
}
// LimitResponse defines how to handle requests that can not be executed right now.
// +union
type LimitResponse struct {
// `type` is "Queue" or "Reject".
// "Queue" means that requests that can not be executed upon arrival
// are held in a queue until they can be executed or a queuing limit
// is reached.
// "Reject" means that requests that can not be executed upon arrival
// are rejected.
// Required.
// +unionDiscriminator
Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"`
// `queuing` holds the configuration parameters for queuing.
// This field may be non-empty only if `type` is `"Queue"`.
// +optional
Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"`
}
// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now
type LimitResponseType string
// Supported limit responses.
const (
// LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit
LimitResponseTypeQueue LimitResponseType = "Queue"
// LimitResponseTypeReject means that requests that can not be executed right now are rejected
LimitResponseTypeReject LimitResponseType = "Reject"
)
// QueuingConfiguration holds the configuration parameters for queuing
type QueuingConfiguration struct {
// `queues` is the number of queues for this priority level. The
// queues exist independently at each apiserver. The value must be
// positive. Setting it to 1 effectively precludes
// shufflesharding and thus makes the distinguisher method of
// associated flow schemas irrelevant. This field has a default
// value of 64.
// +optional
Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"`
// `handSize` is a small positive number that configures the
// shuffle sharding of requests into queues. When enqueuing a request
// at this priority level the request's flow identifier (a string
// pair) is hashed and the hash value is used to shuffle the list
// of queues and deal a hand of the size specified here. The
// request is put into one of the shortest queues in that hand.
// `handSize` must be no larger than `queues`, and should be
// significantly smaller (so that a few heavy flows do not
// saturate most of the queues). See the user-facing
// documentation for more extensive guidance on setting this
// field. This field has a default value of 8.
// +optional
HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"`
// `queueLengthLimit` is the maximum number of requests allowed to
// be waiting in a given queue of this priority level at a time;
// excess requests are rejected. This value must be positive. If
// not specified, it will be defaulted to 50.
// +optional
QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"`
}
// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type
type PriorityLevelConfigurationConditionType string
// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
type PriorityLevelConfigurationStatus struct {
// `conditions` is the current state of "request-priority".
// +listType=map
// +listMapKey=type
// +optional
Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
}
// PriorityLevelConfigurationCondition defines the condition of priority level.
type PriorityLevelConfigurationCondition struct {
// `type` is the type of the condition.
// Required.
Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
// `status` is the status of the condition.
// Can be True, False, Unknown.
// Required.
Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
// `lastTransitionTime` is the last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// `message` is a human-readable message indicating details about last transition.
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// ConditionStatus is the status of the condition.
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)

View File

@ -0,0 +1,258 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_FlowDistinguisherMethod = map[string]string{
"": "FlowDistinguisherMethod specifies the method of a flow distinguisher.",
"type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.",
}
func (FlowDistinguisherMethod) SwaggerDoc() map[string]string {
return map_FlowDistinguisherMethod
}
var map_FlowSchema = map[string]string{
"": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".",
"metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
"status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
}
func (FlowSchema) SwaggerDoc() map[string]string {
return map_FlowSchema
}
var map_FlowSchemaCondition = map[string]string{
"": "FlowSchemaCondition describes conditions for a FlowSchema.",
"type": "`type` is the type of the condition. Required.",
"status": "`status` is the status of the condition. Can be True, False, Unknown. Required.",
"lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.",
"reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.",
"message": "`message` is a human-readable message indicating details about last transition.",
}
func (FlowSchemaCondition) SwaggerDoc() map[string]string {
return map_FlowSchemaCondition
}
var map_FlowSchemaList = map[string]string{
"": "FlowSchemaList is a list of FlowSchema objects.",
"metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"items": "`items` is a list of FlowSchemas.",
}
func (FlowSchemaList) SwaggerDoc() map[string]string {
return map_FlowSchemaList
}
var map_FlowSchemaSpec = map[string]string{
"": "FlowSchemaSpec describes how the FlowSchema's specification looks like.",
"priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.",
"matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be non-negative. Note that if the precedence is not specified or zero, it will be set to 1000 as default.",
"distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.",
"rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.",
}
func (FlowSchemaSpec) SwaggerDoc() map[string]string {
return map_FlowSchemaSpec
}
var map_FlowSchemaStatus = map[string]string{
"": "FlowSchemaStatus represents the current state of a FlowSchema.",
"conditions": "`conditions` is a list of the current states of FlowSchema.",
}
func (FlowSchemaStatus) SwaggerDoc() map[string]string {
return map_FlowSchemaStatus
}
var map_GroupSubject = map[string]string{
"": "GroupSubject holds detailed information for group-kind subject.",
"name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.",
}
func (GroupSubject) SwaggerDoc() map[string]string {
return map_GroupSubject
}
var map_LimitResponse = map[string]string{
"": "LimitResponse defines how to handle requests that can not be executed right now.",
"type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.",
"queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.",
}
func (LimitResponse) SwaggerDoc() map[string]string {
return map_LimitResponse
}
var map_LimitedPriorityLevelConfiguration = map[string]string{
"": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?",
"assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ",
"limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now",
}
func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string {
return map_LimitedPriorityLevelConfiguration
}
var map_NonResourcePolicyRule = map[string]string{
"": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.",
"verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.",
"nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.",
}
func (NonResourcePolicyRule) SwaggerDoc() map[string]string {
return map_NonResourcePolicyRule
}
var map_PolicyRulesWithSubjects = map[string]string{
"": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.",
"subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.",
"resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.",
"nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.",
}
func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string {
return map_PolicyRulesWithSubjects
}
var map_PriorityLevelConfiguration = map[string]string{
"": "PriorityLevelConfiguration represents the configuration of a priority level.",
"metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
"status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
}
func (PriorityLevelConfiguration) SwaggerDoc() map[string]string {
return map_PriorityLevelConfiguration
}
var map_PriorityLevelConfigurationCondition = map[string]string{
"": "PriorityLevelConfigurationCondition defines the condition of priority level.",
"type": "`type` is the type of the condition. Required.",
"status": "`status` is the status of the condition. Can be True, False, Unknown. Required.",
"lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.",
"reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.",
"message": "`message` is a human-readable message indicating details about last transition.",
}
func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string {
return map_PriorityLevelConfigurationCondition
}
var map_PriorityLevelConfigurationList = map[string]string{
"": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.",
"metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"items": "`items` is a list of request-priorities.",
}
func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string {
return map_PriorityLevelConfigurationList
}
var map_PriorityLevelConfigurationReference = map[string]string{
"": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.",
"name": "`name` is the name of the priority level configuration being referenced Required.",
}
func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string {
return map_PriorityLevelConfigurationReference
}
var map_PriorityLevelConfigurationSpec = map[string]string{
"": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.",
"type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.",
"limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.",
}
func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string {
return map_PriorityLevelConfigurationSpec
}
var map_PriorityLevelConfigurationStatus = map[string]string{
"": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".",
"conditions": "`conditions` is the current state of \"request-priority\".",
}
func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string {
return map_PriorityLevelConfigurationStatus
}
var map_QueuingConfiguration = map[string]string{
"": "QueuingConfiguration holds the configuration parameters for queuing",
"queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.",
"handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.",
"queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.",
}
func (QueuingConfiguration) SwaggerDoc() map[string]string {
return map_QueuingConfiguration
}
var map_ResourcePolicyRule = map[string]string{
"": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.",
"verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.",
"apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.",
"resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.",
"clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.",
"namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.",
}
func (ResourcePolicyRule) SwaggerDoc() map[string]string {
return map_ResourcePolicyRule
}
var map_ServiceAccountSubject = map[string]string{
"": "ServiceAccountSubject holds detailed information for service-account-kind subject.",
"namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.",
"name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.",
}
func (ServiceAccountSubject) SwaggerDoc() map[string]string {
return map_ServiceAccountSubject
}
var map_Subject = map[string]string{
"": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.",
"kind": "Required",
}
func (Subject) SwaggerDoc() map[string]string {
return map_Subject
}
var map_UserSubject = map[string]string{
"": "UserSubject holds detailed information for user-kind subject.",
"name": "`name` is the username that matches, or \"*\" to match all usernames. Required.",
}
func (UserSubject) SwaggerDoc() map[string]string {
return map_UserSubject
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -0,0 +1,541 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod.
func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod {
if in == nil {
return nil
}
out := new(FlowDistinguisherMethod)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchema) DeepCopyInto(out *FlowSchema) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema.
func (in *FlowSchema) DeepCopy() *FlowSchema {
if in == nil {
return nil
}
out := new(FlowSchema)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FlowSchema) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition.
func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition {
if in == nil {
return nil
}
out := new(FlowSchemaCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]FlowSchema, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList.
func (in *FlowSchemaList) DeepCopy() *FlowSchemaList {
if in == nil {
return nil
}
out := new(FlowSchemaList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FlowSchemaList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) {
*out = *in
out.PriorityLevelConfiguration = in.PriorityLevelConfiguration
if in.DistinguisherMethod != nil {
in, out := &in.DistinguisherMethod, &out.DistinguisherMethod
*out = new(FlowDistinguisherMethod)
**out = **in
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PolicyRulesWithSubjects, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec.
func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec {
if in == nil {
return nil
}
out := new(FlowSchemaSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]FlowSchemaCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus.
func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus {
if in == nil {
return nil
}
out := new(FlowSchemaStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GroupSubject) DeepCopyInto(out *GroupSubject) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject.
func (in *GroupSubject) DeepCopy() *GroupSubject {
if in == nil {
return nil
}
out := new(GroupSubject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitResponse) DeepCopyInto(out *LimitResponse) {
*out = *in
if in.Queuing != nil {
in, out := &in.Queuing, &out.Queuing
*out = new(QueuingConfiguration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse.
func (in *LimitResponse) DeepCopy() *LimitResponse {
if in == nil {
return nil
}
out := new(LimitResponse)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) {
*out = *in
in.LimitResponse.DeepCopyInto(&out.LimitResponse)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration.
func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration {
if in == nil {
return nil
}
out := new(LimitedPriorityLevelConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) {
*out = *in
if in.Verbs != nil {
in, out := &in.Verbs, &out.Verbs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NonResourceURLs != nil {
in, out := &in.NonResourceURLs, &out.NonResourceURLs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule.
func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule {
if in == nil {
return nil
}
out := new(NonResourcePolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) {
*out = *in
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]Subject, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]ResourcePolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NonResourceRules != nil {
in, out := &in.NonResourceRules, &out.NonResourceRules
*out = make([]NonResourcePolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects.
func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects {
if in == nil {
return nil
}
out := new(PolicyRulesWithSubjects)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration.
func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration {
if in == nil {
return nil
}
out := new(PriorityLevelConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition.
func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PriorityLevelConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList.
func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference.
func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) {
*out = *in
if in.Limited != nil {
in, out := &in.Limited, &out.Limited
*out = new(LimitedPriorityLevelConfiguration)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec.
func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]PriorityLevelConfigurationCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus.
func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration.
func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration {
if in == nil {
return nil
}
out := new(QueuingConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) {
*out = *in
if in.Verbs != nil {
in, out := &in.Verbs, &out.Verbs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.APIGroups != nil {
in, out := &in.APIGroups, &out.APIGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule.
func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule {
if in == nil {
return nil
}
out := new(ResourcePolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject.
func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject {
if in == nil {
return nil
}
out := new(ServiceAccountSubject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Subject) DeepCopyInto(out *Subject) {
*out = *in
if in.User != nil {
in, out := &in.User, &out.User
*out = new(UserSubject)
**out = **in
}
if in.Group != nil {
in, out := &in.Group, &out.Group
*out = new(GroupSubject)
**out = **in
}
if in.ServiceAccount != nil {
in, out := &in.ServiceAccount, &out.ServiceAccount
*out = new(ServiceAccountSubject)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
func (in *Subject) DeepCopy() *Subject {
if in == nil {
return nil
}
out := new(Subject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserSubject) DeepCopyInto(out *UserSubject) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject.
func (in *UserSubject) DeepCopy() *UserSubject {
if in == nil {
return nil
}
out := new(UserSubject)
in.DeepCopyInto(out)
return out
}

View File

@ -151,7 +151,7 @@ message PodDisruptionBudgetSpec {
// PodDisruptionBudget. Status may trail the actual state of a system.
message PodDisruptionBudgetStatus {
// Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
// status informatio is valid only if observedGeneration equals to PDB's object generation.
// status information is valid only if observedGeneration equals to PDB's object generation.
// +optional
optional int64 observedGeneration = 1;

View File

@ -48,7 +48,7 @@ type PodDisruptionBudgetSpec struct {
// PodDisruptionBudget. Status may trail the actual state of a system.
type PodDisruptionBudgetStatus struct {
// Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
// status informatio is valid only if observedGeneration equals to PDB's object generation.
// status information is valid only if observedGeneration equals to PDB's object generation.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
@ -276,7 +276,7 @@ var AllowAllCapabilities v1.Capability = "*"
// FSType gives strong typing to different file systems that are used by volumes.
type FSType string
var (
const (
AzureFile FSType = "azureFile"
Flocker FSType = "flocker"
FlexVolume FSType = "flexVolume"

View File

@ -126,7 +126,7 @@ func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
var map_PodDisruptionBudgetStatus = map[string]string{
"": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.",
"observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.",
"observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.",
"disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
"disruptionsAllowed": "Number of pod disruptions that are currently allowed.",
"currentHealthy": "current number of healthy pods",

View File

@ -37,6 +37,7 @@ message AggregationRule {
}
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.
message ClusterRole {
// Standard object's metadata.
// +optional
@ -55,6 +56,7 @@ message ClusterRole {
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
// and adds who information via Subject.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.
message ClusterRoleBinding {
// Standard object's metadata.
// +optional
@ -69,7 +71,8 @@ message ClusterRoleBinding {
optional RoleRef roleRef = 3;
}
// ClusterRoleBindingList is a collection of ClusterRoleBindings
// ClusterRoleBindingList is a collection of ClusterRoleBindings.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.
message ClusterRoleBindingList {
// Standard object's metadata.
// +optional
@ -79,7 +82,8 @@ message ClusterRoleBindingList {
repeated ClusterRoleBinding items = 2;
}
// ClusterRoleList is a collection of ClusterRoles
// ClusterRoleList is a collection of ClusterRoles.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.
message ClusterRoleList {
// Standard object's metadata.
// +optional
@ -117,6 +121,7 @@ message PolicyRule {
}
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.
message Role {
// Standard object's metadata.
// +optional
@ -130,6 +135,7 @@ message Role {
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
// namespace only have effect in that namespace.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.
message RoleBinding {
// Standard object's metadata.
// +optional
@ -145,6 +151,7 @@ message RoleBinding {
}
// RoleBindingList is a collection of RoleBindings
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.
message RoleBindingList {
// Standard object's metadata.
// +optional
@ -154,7 +161,8 @@ message RoleBindingList {
repeated RoleBinding items = 2;
}
// RoleList is a collection of Roles
// RoleList is a collection of Roles.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.
message RoleList {
// Standard object's metadata.
// +optional

View File

@ -103,6 +103,7 @@ type RoleRef struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.
type Role struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -120,6 +121,7 @@ type Role struct {
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
// namespace only have effect in that namespace.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.
type RoleBinding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -138,6 +140,7 @@ type RoleBinding struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RoleBindingList is a collection of RoleBindings
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.
type RoleBindingList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -150,7 +153,8 @@ type RoleBindingList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RoleList is a collection of Roles
// RoleList is a collection of Roles.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.
type RoleList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -166,6 +170,7 @@ type RoleList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.
type ClusterRole struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -197,6 +202,7 @@ type AggregationRule struct {
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
// and adds who information via Subject.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.
type ClusterRoleBinding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -214,7 +220,8 @@ type ClusterRoleBinding struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRoleBindingList is a collection of ClusterRoleBindings
// ClusterRoleBindingList is a collection of ClusterRoleBindings.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.
type ClusterRoleBindingList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -227,7 +234,8 @@ type ClusterRoleBindingList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRoleList is a collection of ClusterRoles
// ClusterRoleList is a collection of ClusterRoles.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.
type ClusterRoleList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.

View File

@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string {
}
var map_ClusterRole = map[string]string{
"": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.",
"": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"rules": "Rules holds all the PolicyRules for this ClusterRole",
"aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string {
}
var map_ClusterRoleBinding = map[string]string{
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"subjects": "Subjects holds references to the objects the role applies to.",
"roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string {
}
var map_ClusterRoleBindingList = map[string]string{
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings",
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of ClusterRoleBindings",
}
@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
}
var map_ClusterRoleList = map[string]string{
"": "ClusterRoleList is a collection of ClusterRoles",
"": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of ClusterRoles",
}
@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string {
}
var map_Role = map[string]string{
"": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.",
"": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"rules": "Rules holds all the PolicyRules for this Role",
}
@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string {
}
var map_RoleBinding = map[string]string{
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.",
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"subjects": "Subjects holds references to the objects the role applies to.",
"roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string {
}
var map_RoleBindingList = map[string]string{
"": "RoleBindingList is a collection of RoleBindings",
"": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of RoleBindings",
}
@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string {
}
var map_RoleList = map[string]string{
"": "RoleList is a collection of Roles",
"": "RoleList is a collection of Roles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of Roles",
}

View File

@ -37,6 +37,7 @@ message AggregationRule {
}
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.
message ClusterRole {
// Standard object's metadata.
// +optional
@ -55,6 +56,7 @@ message ClusterRole {
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
// and adds who information via Subject.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.
message ClusterRoleBinding {
// Standard object's metadata.
// +optional
@ -69,7 +71,8 @@ message ClusterRoleBinding {
optional RoleRef roleRef = 3;
}
// ClusterRoleBindingList is a collection of ClusterRoleBindings
// ClusterRoleBindingList is a collection of ClusterRoleBindings.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.
message ClusterRoleBindingList {
// Standard object's metadata.
// +optional
@ -79,7 +82,8 @@ message ClusterRoleBindingList {
repeated ClusterRoleBinding items = 2;
}
// ClusterRoleList is a collection of ClusterRoles
// ClusterRoleList is a collection of ClusterRoles.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.
message ClusterRoleList {
// Standard object's metadata.
// +optional
@ -117,6 +121,7 @@ message PolicyRule {
}
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.
message Role {
// Standard object's metadata.
// +optional
@ -130,6 +135,7 @@ message Role {
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
// namespace only have effect in that namespace.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.
message RoleBinding {
// Standard object's metadata.
// +optional
@ -145,6 +151,7 @@ message RoleBinding {
}
// RoleBindingList is a collection of RoleBindings
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.
message RoleBindingList {
// Standard object's metadata.
// +optional
@ -155,6 +162,7 @@ message RoleBindingList {
}
// RoleList is a collection of Roles
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.
message RoleList {
// Standard object's metadata.
// +optional

View File

@ -102,6 +102,7 @@ type RoleRef struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.
type Role struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -119,6 +120,7 @@ type Role struct {
// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
// namespace only have effect in that namespace.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.
type RoleBinding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -137,6 +139,7 @@ type RoleBinding struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RoleBindingList is a collection of RoleBindings
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.
type RoleBindingList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -150,6 +153,7 @@ type RoleBindingList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RoleList is a collection of Roles
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.
type RoleList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -165,6 +169,7 @@ type RoleList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.
type ClusterRole struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -195,6 +200,7 @@ type AggregationRule struct {
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
// and adds who information via Subject.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.
type ClusterRoleBinding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -212,7 +218,8 @@ type ClusterRoleBinding struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRoleBindingList is a collection of ClusterRoleBindings
// ClusterRoleBindingList is a collection of ClusterRoleBindings.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.
type ClusterRoleBindingList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -225,7 +232,8 @@ type ClusterRoleBindingList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRoleList is a collection of ClusterRoles
// ClusterRoleList is a collection of ClusterRoles.
// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.
type ClusterRoleList struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.

View File

@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string {
}
var map_ClusterRole = map[string]string{
"": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.",
"": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"rules": "Rules holds all the PolicyRules for this ClusterRole",
"aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string {
}
var map_ClusterRoleBinding = map[string]string{
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"subjects": "Subjects holds references to the objects the role applies to.",
"roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string {
}
var map_ClusterRoleBindingList = map[string]string{
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings",
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of ClusterRoleBindings",
}
@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
}
var map_ClusterRoleList = map[string]string{
"": "ClusterRoleList is a collection of ClusterRoles",
"": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of ClusterRoles",
}
@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string {
}
var map_Role = map[string]string{
"": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.",
"": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"rules": "Rules holds all the PolicyRules for this Role",
}
@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string {
}
var map_RoleBinding = map[string]string{
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.",
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"subjects": "Subjects holds references to the objects the role applies to.",
"roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string {
}
var map_RoleBindingList = map[string]string{
"": "RoleBindingList is a collection of RoleBindings",
"": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of RoleBindings",
}
@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string {
}
var map_RoleList = map[string]string{
"": "RoleList is a collection of Roles",
"": "RoleList is a collection of Roles Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of Roles",
}

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,80 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
// CSINode holds information about all CSI drivers installed on a node.
// CSI drivers do not need to create the CSINode object directly. As long as
// they use the node-driver-registrar sidecar container, the kubelet will
// automatically populate the CSINode object for the CSI driver as part of
// kubelet plugin registration.
// CSINode has the same name as a node. If the object is missing, it means either
// there are no CSI Drivers available on the node, or the Kubelet version is low
// enough that it doesn't create this object.
// CSINode has an OwnerReference that points to the corresponding node object.
message CSINode {
// metadata.name must be the Kubernetes node name.
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// spec is the specification of CSINode
optional CSINodeSpec spec = 2;
}
// CSINodeDriver holds information about the specification of one CSI driver installed on a node
message CSINodeDriver {
// This is the name of the CSI driver that this object refers to.
// This MUST be the same name returned by the CSI GetPluginName() call for
// that driver.
optional string name = 1;
// nodeID of the node from the driver point of view.
// This field enables Kubernetes to communicate with storage systems that do
// not share the same nomenclature for nodes. For example, Kubernetes may
// refer to a given node as "node1", but the storage system may refer to
// the same node as "nodeA". When Kubernetes issues a command to the storage
// system to attach a volume to a specific node, it can use this field to
// refer to the node name using the ID that the storage system will
// understand, e.g. "nodeA" instead of "node1". This field is required.
optional string nodeID = 2;
// topologyKeys is the list of keys supported by the driver.
// When a driver is initialized on a cluster, it provides a set of topology
// keys that it understands (e.g. "company.com/zone", "company.com/region").
// When a driver is initialized on a node, it provides the same topology keys
// along with values. Kubelet will expose these topology keys as labels
// on its own node object.
// When Kubernetes does topology aware provisioning, it can use this list to
// determine which labels it should retrieve from the node object and pass
// back to the driver.
// It is possible for different nodes to use different topology keys.
// This can be empty if driver does not support topology.
// +optional
repeated string topologyKeys = 3;
// allocatable represents the volume resources of a node that are available for scheduling.
// This field is beta.
// +optional
optional VolumeNodeResources allocatable = 4;
}
// CSINodeList is a collection of CSINode objects.
message CSINodeList {
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// items is the list of CSINode
repeated CSINode items = 2;
}
// CSINodeSpec holds information about the specification of all CSI drivers installed on a node
message CSINodeSpec {
// drivers is a list of information of all CSI Drivers existing on a node.
// If all drivers in the list are uninstalled, this can become empty.
// +patchMergeKey=name
// +patchStrategy=merge
repeated CSINodeDriver drivers = 1;
}
// StorageClass describes the parameters for a class of storage for
// which PersistentVolumes can be dynamically provisioned.
//
@ -193,3 +267,13 @@ message VolumeError {
optional string message = 2;
}
// VolumeNodeResources is a set of resource limits for scheduling of volumes.
message VolumeNodeResources {
// Maximum number of unique volumes managed by the CSI driver that can be used on a node.
// A volume that is both attached and mounted on a node is considered to be used once, not twice.
// The same rule applies for a unique volume that is shared among multiple pods on the same node.
// If this field is not specified, then the supported number of volumes on this node is unbounded.
// +optional
optional int32 count = 1;
}

View File

@ -49,6 +49,9 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&VolumeAttachment{},
&VolumeAttachmentList{},
&CSINode{},
&CSINodeList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)

View File

@ -17,7 +17,7 @@ limitations under the License.
package v1
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -216,3 +216,97 @@ type VolumeError struct {
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSINode holds information about all CSI drivers installed on a node.
// CSI drivers do not need to create the CSINode object directly. As long as
// they use the node-driver-registrar sidecar container, the kubelet will
// automatically populate the CSINode object for the CSI driver as part of
// kubelet plugin registration.
// CSINode has the same name as a node. If the object is missing, it means either
// there are no CSI Drivers available on the node, or the Kubelet version is low
// enough that it doesn't create this object.
// CSINode has an OwnerReference that points to the corresponding node object.
type CSINode struct {
metav1.TypeMeta `json:",inline"`
// metadata.name must be the Kubernetes node name.
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec is the specification of CSINode
Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
}
// CSINodeSpec holds information about the specification of all CSI drivers installed on a node
type CSINodeSpec struct {
// drivers is a list of information of all CSI Drivers existing on a node.
// If all drivers in the list are uninstalled, this can become empty.
// +patchMergeKey=name
// +patchStrategy=merge
Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"`
}
// CSINodeDriver holds information about the specification of one CSI driver installed on a node
type CSINodeDriver struct {
// This is the name of the CSI driver that this object refers to.
// This MUST be the same name returned by the CSI GetPluginName() call for
// that driver.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// nodeID of the node from the driver point of view.
// This field enables Kubernetes to communicate with storage systems that do
// not share the same nomenclature for nodes. For example, Kubernetes may
// refer to a given node as "node1", but the storage system may refer to
// the same node as "nodeA". When Kubernetes issues a command to the storage
// system to attach a volume to a specific node, it can use this field to
// refer to the node name using the ID that the storage system will
// understand, e.g. "nodeA" instead of "node1". This field is required.
NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"`
// topologyKeys is the list of keys supported by the driver.
// When a driver is initialized on a cluster, it provides a set of topology
// keys that it understands (e.g. "company.com/zone", "company.com/region").
// When a driver is initialized on a node, it provides the same topology keys
// along with values. Kubelet will expose these topology keys as labels
// on its own node object.
// When Kubernetes does topology aware provisioning, it can use this list to
// determine which labels it should retrieve from the node object and pass
// back to the driver.
// It is possible for different nodes to use different topology keys.
// This can be empty if driver does not support topology.
// +optional
TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"`
// allocatable represents the volume resources of a node that are available for scheduling.
// This field is beta.
// +optional
Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"`
}
// VolumeNodeResources is a set of resource limits for scheduling of volumes.
type VolumeNodeResources struct {
// Maximum number of unique volumes managed by the CSI driver that can be used on a node.
// A volume that is both attached and mounted on a node is considered to be used once, not twice.
// The same rule applies for a unique volume that is shared among multiple pods on the same node.
// If this field is not specified, then the supported number of volumes on this node is unbounded.
// +optional
Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSINodeList is a collection of CSINode objects.
type CSINodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of CSINode
Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"`
}

View File

@ -27,6 +27,47 @@ package v1
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_CSINode = map[string]string{
"": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.",
"metadata": "metadata.name must be the Kubernetes node name.",
"spec": "spec is the specification of CSINode",
}
func (CSINode) SwaggerDoc() map[string]string {
return map_CSINode
}
var map_CSINodeDriver = map[string]string{
"": "CSINodeDriver holds information about the specification of one CSI driver installed on a node",
"name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.",
"nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.",
"topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.",
"allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.",
}
func (CSINodeDriver) SwaggerDoc() map[string]string {
return map_CSINodeDriver
}
var map_CSINodeList = map[string]string{
"": "CSINodeList is a collection of CSINode objects.",
"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"items": "items is the list of CSINode",
}
func (CSINodeList) SwaggerDoc() map[string]string {
return map_CSINodeList
}
var map_CSINodeSpec = map[string]string{
"": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node",
"drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.",
}
func (CSINodeSpec) SwaggerDoc() map[string]string {
return map_CSINodeSpec
}
var map_StorageClass = map[string]string{
"": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
@ -116,4 +157,13 @@ func (VolumeError) SwaggerDoc() map[string]string {
return map_VolumeError
}
var map_VolumeNodeResources = map[string]string{
"": "VolumeNodeResources is a set of resource limits for scheduling of volumes.",
"count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.",
}
func (VolumeNodeResources) SwaggerDoc() map[string]string {
return map_VolumeNodeResources
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -25,6 +25,115 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINode) DeepCopyInto(out *CSINode) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode.
func (in *CSINode) DeepCopy() *CSINode {
if in == nil {
return nil
}
out := new(CSINode)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSINode) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) {
*out = *in
if in.TopologyKeys != nil {
in, out := &in.TopologyKeys, &out.TopologyKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Allocatable != nil {
in, out := &in.Allocatable, &out.Allocatable
*out = new(VolumeNodeResources)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver.
func (in *CSINodeDriver) DeepCopy() *CSINodeDriver {
if in == nil {
return nil
}
out := new(CSINodeDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeList) DeepCopyInto(out *CSINodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CSINode, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList.
func (in *CSINodeList) DeepCopy() *CSINodeList {
if in == nil {
return nil
}
out := new(CSINodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSINodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) {
*out = *in
if in.Drivers != nil {
in, out := &in.Drivers, &out.Drivers
*out = make([]CSINodeDriver, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec.
func (in *CSINodeSpec) DeepCopy() *CSINodeSpec {
if in == nil {
return nil
}
out := new(CSINodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageClass) DeepCopyInto(out *StorageClass) {
*out = *in
@ -271,3 +380,24 @@ func (in *VolumeError) DeepCopy() *VolumeError {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) {
*out = *in
if in.Count != nil {
in, out := &in.Count, &out.Count
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources.
func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources {
if in == nil {
return nil
}
out := new(VolumeNodeResources)
in.DeepCopyInto(out)
return out
}

View File

@ -121,6 +121,8 @@ message CSIDriverSpec {
repeated string volumeLifecycleModes = 3;
}
// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode.
// See the release notes for more information.
// CSINode holds information about all CSI drivers installed on a node.
// CSI drivers do not need to create the CSINode object directly. As long as
// they use the node-driver-registrar sidecar container, the kubelet will

View File

@ -348,6 +348,8 @@ const (
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode.
// See the release notes for more information.
// CSINode holds information about all CSI drivers installed on a node.
// CSI drivers do not need to create the CSINode object directly. As long as
// they use the node-driver-registrar sidecar container, the kubelet will

Some files were not shown because too many files have changed in this diff Show More