add dependent vendor

Signed-off-by: changzhen <changzhen5@huawei.com>
This commit is contained in:
changzhen 2021-12-16 20:39:28 +08:00
parent 00f46c6452
commit 08d98c021e
144 changed files with 20059 additions and 0 deletions

1
go.mod
View File

@ -36,4 +36,5 @@ require (
sigs.k8s.io/kind v0.11.1
sigs.k8s.io/mcs-api v0.1.0
sigs.k8s.io/yaml v1.3.0
sigs.k8s.io/structured-merge-diff/v4 v4.1.2
)

3
vendor/golang.org/x/crypto/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at https://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/crypto/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at https://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/crypto/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/crypto/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

804
vendor/golang.org/x/crypto/cryptobyte/asn1.go generated vendored Normal file
View File

@ -0,0 +1,804 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cryptobyte
import (
encoding_asn1 "encoding/asn1"
"fmt"
"math/big"
"reflect"
"time"
"golang.org/x/crypto/cryptobyte/asn1"
)
// This file contains ASN.1-related methods for String and Builder.
// Builder
// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER.
func (b *Builder) AddASN1Int64(v int64) {
b.addASN1Signed(asn1.INTEGER, v)
}
// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the
// given tag.
func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) {
b.addASN1Signed(tag, v)
}
// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION.
func (b *Builder) AddASN1Enum(v int64) {
b.addASN1Signed(asn1.ENUM, v)
}
func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) {
b.AddASN1(tag, func(c *Builder) {
length := 1
for i := v; i >= 0x80 || i < -0x80; i >>= 8 {
length++
}
for ; length > 0; length-- {
i := v >> uint((length-1)*8) & 0xff
c.AddUint8(uint8(i))
}
})
}
// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER.
func (b *Builder) AddASN1Uint64(v uint64) {
b.AddASN1(asn1.INTEGER, func(c *Builder) {
length := 1
for i := v; i >= 0x80; i >>= 8 {
length++
}
for ; length > 0; length-- {
i := v >> uint((length-1)*8) & 0xff
c.AddUint8(uint8(i))
}
})
}
// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER.
func (b *Builder) AddASN1BigInt(n *big.Int) {
if b.err != nil {
return
}
b.AddASN1(asn1.INTEGER, func(c *Builder) {
if n.Sign() < 0 {
// A negative number has to be converted to two's-complement form. So we
// invert and subtract 1. If the most-significant-bit isn't set then
// we'll need to pad the beginning with 0xff in order to keep the number
// negative.
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bytes := nMinus1.Bytes()
for i := range bytes {
bytes[i] ^= 0xff
}
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
c.add(0xff)
}
c.add(bytes...)
} else if n.Sign() == 0 {
c.add(0)
} else {
bytes := n.Bytes()
if bytes[0]&0x80 != 0 {
c.add(0)
}
c.add(bytes...)
}
})
}
// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING.
func (b *Builder) AddASN1OctetString(bytes []byte) {
b.AddASN1(asn1.OCTET_STRING, func(c *Builder) {
c.AddBytes(bytes)
})
}
const generalizedTimeFormatStr = "20060102150405Z0700"
// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME.
func (b *Builder) AddASN1GeneralizedTime(t time.Time) {
if t.Year() < 0 || t.Year() > 9999 {
b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t)
return
}
b.AddASN1(asn1.GeneralizedTime, func(c *Builder) {
c.AddBytes([]byte(t.Format(generalizedTimeFormatStr)))
})
}
// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime.
func (b *Builder) AddASN1UTCTime(t time.Time) {
b.AddASN1(asn1.UTCTime, func(c *Builder) {
// As utilized by the X.509 profile, UTCTime can only
// represent the years 1950 through 2049.
if t.Year() < 1950 || t.Year() >= 2050 {
b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t)
return
}
c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr)))
})
}
// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not
// support BIT STRINGs that are not a whole number of bytes.
func (b *Builder) AddASN1BitString(data []byte) {
b.AddASN1(asn1.BIT_STRING, func(b *Builder) {
b.AddUint8(0)
b.AddBytes(data)
})
}
func (b *Builder) addBase128Int(n int64) {
var length int
if n == 0 {
length = 1
} else {
for i := n; i > 0; i >>= 7 {
length++
}
}
for i := length - 1; i >= 0; i-- {
o := byte(n >> uint(i*7))
o &= 0x7f
if i != 0 {
o |= 0x80
}
b.add(o)
}
}
func isValidOID(oid encoding_asn1.ObjectIdentifier) bool {
if len(oid) < 2 {
return false
}
if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) {
return false
}
for _, v := range oid {
if v < 0 {
return false
}
}
return true
}
func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) {
b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) {
if !isValidOID(oid) {
b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid)
return
}
b.addBase128Int(int64(oid[0])*40 + int64(oid[1]))
for _, v := range oid[2:] {
b.addBase128Int(int64(v))
}
})
}
func (b *Builder) AddASN1Boolean(v bool) {
b.AddASN1(asn1.BOOLEAN, func(b *Builder) {
if v {
b.AddUint8(0xff)
} else {
b.AddUint8(0)
}
})
}
func (b *Builder) AddASN1NULL() {
b.add(uint8(asn1.NULL), 0)
}
// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if
// successful or records an error if one occurred.
func (b *Builder) MarshalASN1(v interface{}) {
// NOTE(martinkr): This is somewhat of a hack to allow propagation of
// encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a
// value embedded into a struct, its tag information is lost.
if b.err != nil {
return
}
bytes, err := encoding_asn1.Marshal(v)
if err != nil {
b.err = err
return
}
b.AddBytes(bytes)
}
// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag.
// Tags greater than 30 are not supported and result in an error (i.e.
// low-tag-number form only). The child builder passed to the
// BuilderContinuation can be used to build the content of the ASN.1 object.
func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) {
if b.err != nil {
return
}
// Identifiers with the low five bits set indicate high-tag-number format
// (two or more octets), which we don't support.
if tag&0x1f == 0x1f {
b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag)
return
}
b.AddUint8(uint8(tag))
b.addLengthPrefixed(1, true, f)
}
// String
// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean
// representation into out and advances. It reports whether the read
// was successful.
func (s *String) ReadASN1Boolean(out *bool) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 {
return false
}
switch bytes[0] {
case 0:
*out = false
case 0xff:
*out = true
default:
return false
}
return true
}
var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem()
// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does
// not point to an integer or to a big.Int, it panics. It reports whether the
// read was successful.
func (s *String) ReadASN1Integer(out interface{}) bool {
if reflect.TypeOf(out).Kind() != reflect.Ptr {
panic("out is not a pointer")
}
switch reflect.ValueOf(out).Elem().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var i int64
if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) {
return false
}
reflect.ValueOf(out).Elem().SetInt(i)
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
var u uint64
if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) {
return false
}
reflect.ValueOf(out).Elem().SetUint(u)
return true
case reflect.Struct:
if reflect.TypeOf(out).Elem() == bigIntType {
return s.readASN1BigInt(out.(*big.Int))
}
}
panic("out does not point to an integer type")
}
func checkASN1Integer(bytes []byte) bool {
if len(bytes) == 0 {
// An INTEGER is encoded with at least one octet.
return false
}
if len(bytes) == 1 {
return true
}
if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 {
// Value is not minimally encoded.
return false
}
return true
}
var bigOne = big.NewInt(1)
func (s *String) readASN1BigInt(out *big.Int) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
return false
}
if bytes[0]&0x80 == 0x80 {
// Negative number.
neg := make([]byte, len(bytes))
for i, b := range bytes {
neg[i] = ^b
}
out.SetBytes(neg)
out.Add(out, bigOne)
out.Neg(out)
} else {
out.SetBytes(bytes)
}
return true
}
func (s *String) readASN1Int64(out *int64) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) {
return false
}
return true
}
func asn1Signed(out *int64, n []byte) bool {
length := len(n)
if length > 8 {
return false
}
for i := 0; i < length; i++ {
*out <<= 8
*out |= int64(n[i])
}
// Shift up and down in order to sign extend the result.
*out <<= 64 - uint8(length)*8
*out >>= 64 - uint8(length)*8
return true
}
func (s *String) readASN1Uint64(out *uint64) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) {
return false
}
return true
}
func asn1Unsigned(out *uint64, n []byte) bool {
length := len(n)
if length > 9 || length == 9 && n[0] != 0 {
// Too large for uint64.
return false
}
if n[0]&0x80 != 0 {
// Negative number.
return false
}
for i := 0; i < length; i++ {
*out <<= 8
*out |= uint64(n[i])
}
return true
}
// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out
// and advances. It reports whether the read was successful and resulted in a
// value that can be represented in an int64.
func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool {
var bytes String
return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes)
}
// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports
// whether the read was successful.
func (s *String) ReadASN1Enum(out *int) bool {
var bytes String
var i int64
if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) {
return false
}
if int64(int(i)) != i {
return false
}
*out = int(i)
return true
}
func (s *String) readBase128Int(out *int) bool {
ret := 0
for i := 0; len(*s) > 0; i++ {
if i == 4 {
return false
}
ret <<= 7
b := s.read(1)[0]
ret |= int(b & 0x7f)
if b&0x80 == 0 {
*out = ret
return true
}
}
return false // truncated
}
// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and
// advances. It reports whether the read was successful.
func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 {
return false
}
// In the worst case, we get two elements from the first byte (which is
// encoded differently) and then every varint is a single byte long.
components := make([]int, len(bytes)+1)
// The first varint is 40*value1 + value2:
// According to this packing, value1 can take the values 0, 1 and 2 only.
// When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
// then there are no restrictions on value2.
var v int
if !bytes.readBase128Int(&v) {
return false
}
if v < 80 {
components[0] = v / 40
components[1] = v % 40
} else {
components[0] = 2
components[1] = v - 80
}
i := 2
for ; len(bytes) > 0; i++ {
if !bytes.readBase128Int(&v) {
return false
}
components[i] = v
}
*out = components[:i]
return true
}
// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and
// advances. It reports whether the read was successful.
func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.GeneralizedTime) {
return false
}
t := string(bytes)
res, err := time.Parse(generalizedTimeFormatStr, t)
if err != nil {
return false
}
if serialized := res.Format(generalizedTimeFormatStr); serialized != t {
return false
}
*out = res
return true
}
const defaultUTCTimeFormatStr = "060102150405Z0700"
// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances.
// It reports whether the read was successful.
func (s *String) ReadASN1UTCTime(out *time.Time) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.UTCTime) {
return false
}
t := string(bytes)
formatStr := defaultUTCTimeFormatStr
var err error
res, err := time.Parse(formatStr, t)
if err != nil {
// Fallback to minute precision if we can't parse second
// precision. If we are following X.509 or X.690 we shouldn't
// support this, but we do.
formatStr = "0601021504Z0700"
res, err = time.Parse(formatStr, t)
}
if err != nil {
return false
}
if serialized := res.Format(formatStr); serialized != t {
return false
}
if res.Year() >= 2050 {
// UTCTime interprets the low order digits 50-99 as 1950-99.
// This only applies to its use in the X.509 profile.
// See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
res = res.AddDate(-100, 0, 0)
}
*out = res
return true
}
// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances.
// It reports whether the read was successful.
func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 ||
len(bytes)*8/8 != len(bytes) {
return false
}
paddingBits := uint8(bytes[0])
bytes = bytes[1:]
if paddingBits > 7 ||
len(bytes) == 0 && paddingBits != 0 ||
len(bytes) > 0 && bytes[len(bytes)-1]&(1<<paddingBits-1) != 0 {
return false
}
out.BitLength = len(bytes)*8 - int(paddingBits)
out.Bytes = bytes
return true
}
// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. It is
// an error if the BIT STRING is not a whole number of bytes. It reports
// whether the read was successful.
func (s *String) ReadASN1BitStringAsBytes(out *[]byte) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 {
return false
}
paddingBits := uint8(bytes[0])
if paddingBits != 0 {
return false
}
*out = bytes[1:]
return true
}
// ReadASN1Bytes reads the contents of a DER-encoded ASN.1 element (not including
// tag and length bytes) into out, and advances. The element must match the
// given tag. It reports whether the read was successful.
func (s *String) ReadASN1Bytes(out *[]byte, tag asn1.Tag) bool {
return s.ReadASN1((*String)(out), tag)
}
// ReadASN1 reads the contents of a DER-encoded ASN.1 element (not including
// tag and length bytes) into out, and advances. The element must match the
// given tag. It reports whether the read was successful.
//
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
func (s *String) ReadASN1(out *String, tag asn1.Tag) bool {
var t asn1.Tag
if !s.ReadAnyASN1(out, &t) || t != tag {
return false
}
return true
}
// ReadASN1Element reads the contents of a DER-encoded ASN.1 element (including
// tag and length bytes) into out, and advances. The element must match the
// given tag. It reports whether the read was successful.
//
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
func (s *String) ReadASN1Element(out *String, tag asn1.Tag) bool {
var t asn1.Tag
if !s.ReadAnyASN1Element(out, &t) || t != tag {
return false
}
return true
}
// ReadAnyASN1 reads the contents of a DER-encoded ASN.1 element (not including
// tag and length bytes) into out, sets outTag to its tag, and advances.
// It reports whether the read was successful.
//
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
func (s *String) ReadAnyASN1(out *String, outTag *asn1.Tag) bool {
return s.readASN1(out, outTag, true /* skip header */)
}
// ReadAnyASN1Element reads the contents of a DER-encoded ASN.1 element
// (including tag and length bytes) into out, sets outTag to is tag, and
// advances. It reports whether the read was successful.
//
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
func (s *String) ReadAnyASN1Element(out *String, outTag *asn1.Tag) bool {
return s.readASN1(out, outTag, false /* include header */)
}
// PeekASN1Tag reports whether the next ASN.1 value on the string starts with
// the given tag.
func (s String) PeekASN1Tag(tag asn1.Tag) bool {
if len(s) == 0 {
return false
}
return asn1.Tag(s[0]) == tag
}
// SkipASN1 reads and discards an ASN.1 element with the given tag. It
// reports whether the operation was successful.
func (s *String) SkipASN1(tag asn1.Tag) bool {
var unused String
return s.ReadASN1(&unused, tag)
}
// ReadOptionalASN1 attempts to read the contents of a DER-encoded ASN.1
// element (not including tag and length bytes) tagged with the given tag into
// out. It stores whether an element with the tag was found in outPresent,
// unless outPresent is nil. It reports whether the read was successful.
func (s *String) ReadOptionalASN1(out *String, outPresent *bool, tag asn1.Tag) bool {
present := s.PeekASN1Tag(tag)
if outPresent != nil {
*outPresent = present
}
if present && !s.ReadASN1(out, tag) {
return false
}
return true
}
// SkipOptionalASN1 advances s over an ASN.1 element with the given tag, or
// else leaves s unchanged. It reports whether the operation was successful.
func (s *String) SkipOptionalASN1(tag asn1.Tag) bool {
if !s.PeekASN1Tag(tag) {
return true
}
var unused String
return s.ReadASN1(&unused, tag)
}
// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER
// explicitly tagged with tag into out and advances. If no element with a
// matching tag is present, it writes defaultValue into out instead. If out
// does not point to an integer or to a big.Int, it panics. It reports
// whether the read was successful.
func (s *String) ReadOptionalASN1Integer(out interface{}, tag asn1.Tag, defaultValue interface{}) bool {
if reflect.TypeOf(out).Kind() != reflect.Ptr {
panic("out is not a pointer")
}
var present bool
var i String
if !s.ReadOptionalASN1(&i, &present, tag) {
return false
}
if !present {
switch reflect.ValueOf(out).Elem().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
reflect.ValueOf(out).Elem().Set(reflect.ValueOf(defaultValue))
case reflect.Struct:
if reflect.TypeOf(out).Elem() != bigIntType {
panic("invalid integer type")
}
if reflect.TypeOf(defaultValue).Kind() != reflect.Ptr ||
reflect.TypeOf(defaultValue).Elem() != bigIntType {
panic("out points to big.Int, but defaultValue does not")
}
out.(*big.Int).Set(defaultValue.(*big.Int))
default:
panic("invalid integer type")
}
return true
}
if !i.ReadASN1Integer(out) || !i.Empty() {
return false
}
return true
}
// ReadOptionalASN1OctetString attempts to read an optional ASN.1 OCTET STRING
// explicitly tagged with tag into out and advances. If no element with a
// matching tag is present, it sets "out" to nil instead. It reports
// whether the read was successful.
func (s *String) ReadOptionalASN1OctetString(out *[]byte, outPresent *bool, tag asn1.Tag) bool {
var present bool
var child String
if !s.ReadOptionalASN1(&child, &present, tag) {
return false
}
if outPresent != nil {
*outPresent = present
}
if present {
var oct String
if !child.ReadASN1(&oct, asn1.OCTET_STRING) || !child.Empty() {
return false
}
*out = oct
} else {
*out = nil
}
return true
}
// ReadOptionalASN1Boolean sets *out to the value of the next ASN.1 BOOLEAN or,
// if the next bytes are not an ASN.1 BOOLEAN, to the value of defaultValue.
// It reports whether the operation was successful.
func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool {
var present bool
var child String
if !s.ReadOptionalASN1(&child, &present, asn1.BOOLEAN) {
return false
}
if !present {
*out = defaultValue
return true
}
return s.ReadASN1Boolean(out)
}
func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool {
if len(*s) < 2 {
return false
}
tag, lenByte := (*s)[0], (*s)[1]
if tag&0x1f == 0x1f {
// ITU-T X.690 section 8.1.2
//
// An identifier octet with a tag part of 0x1f indicates a high-tag-number
// form identifier with two or more octets. We only support tags less than
// 31 (i.e. low-tag-number form, single octet identifier).
return false
}
if outTag != nil {
*outTag = asn1.Tag(tag)
}
// ITU-T X.690 section 8.1.3
//
// Bit 8 of the first length byte indicates whether the length is short- or
// long-form.
var length, headerLen uint32 // length includes headerLen
if lenByte&0x80 == 0 {
// Short-form length (section 8.1.3.4), encoded in bits 1-7.
length = uint32(lenByte) + 2
headerLen = 2
} else {
// Long-form length (section 8.1.3.5). Bits 1-7 encode the number of octets
// used to encode the length.
lenLen := lenByte & 0x7f
var len32 uint32
if lenLen == 0 || lenLen > 4 || len(*s) < int(2+lenLen) {
return false
}
lenBytes := String((*s)[2 : 2+lenLen])
if !lenBytes.readUnsigned(&len32, int(lenLen)) {
return false
}
// ITU-T X.690 section 10.1 (DER length forms) requires encoding the length
// with the minimum number of octets.
if len32 < 128 {
// Length should have used short-form encoding.
return false
}
if len32>>((lenLen-1)*8) == 0 {
// Leading octet is 0. Length should have been at least one byte shorter.
return false
}
headerLen = 2 + uint32(lenLen)
if headerLen+len32 < len32 {
// Overflow.
return false
}
length = headerLen + len32
}
if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) {
return false
}
if skipHeader && !out.Skip(int(headerLen)) {
panic("cryptobyte: internal error")
}
return true
}

46
vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package asn1 contains supporting types for parsing and building ASN.1
// messages with the cryptobyte package.
package asn1 // import "golang.org/x/crypto/cryptobyte/asn1"
// Tag represents an ASN.1 identifier octet, consisting of a tag number
// (indicating a type) and class (such as context-specific or constructed).
//
// Methods in the cryptobyte package only support the low-tag-number form, i.e.
// a single identifier octet with bits 7-8 encoding the class and bits 1-6
// encoding the tag number.
type Tag uint8
const (
classConstructed = 0x20
classContextSpecific = 0x80
)
// Constructed returns t with the constructed class bit set.
func (t Tag) Constructed() Tag { return t | classConstructed }
// ContextSpecific returns t with the context-specific class bit set.
func (t Tag) ContextSpecific() Tag { return t | classContextSpecific }
// The following is a list of standard tag and class combinations.
const (
BOOLEAN = Tag(1)
INTEGER = Tag(2)
BIT_STRING = Tag(3)
OCTET_STRING = Tag(4)
NULL = Tag(5)
OBJECT_IDENTIFIER = Tag(6)
ENUM = Tag(10)
UTF8String = Tag(12)
SEQUENCE = Tag(16 | classConstructed)
SET = Tag(17 | classConstructed)
PrintableString = Tag(19)
T61String = Tag(20)
IA5String = Tag(22)
UTCTime = Tag(23)
GeneralizedTime = Tag(24)
GeneralString = Tag(27)
)

337
vendor/golang.org/x/crypto/cryptobyte/builder.go generated vendored Normal file
View File

@ -0,0 +1,337 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cryptobyte
import (
"errors"
"fmt"
)
// A Builder builds byte strings from fixed-length and length-prefixed values.
// Builders either allocate space as needed, or are fixed, which means that
// they write into a given buffer and produce an error if it's exhausted.
//
// The zero value is a usable Builder that allocates space as needed.
//
// Simple values are marshaled and appended to a Builder using methods on the
// Builder. Length-prefixed values are marshaled by providing a
// BuilderContinuation, which is a function that writes the inner contents of
// the value to a given Builder. See the documentation for BuilderContinuation
// for details.
type Builder struct {
err error
result []byte
fixedSize bool
child *Builder
offset int
pendingLenLen int
pendingIsASN1 bool
inContinuation *bool
}
// NewBuilder creates a Builder that appends its output to the given buffer.
// Like append(), the slice will be reallocated if its capacity is exceeded.
// Use Bytes to get the final buffer.
func NewBuilder(buffer []byte) *Builder {
return &Builder{
result: buffer,
}
}
// NewFixedBuilder creates a Builder that appends its output into the given
// buffer. This builder does not reallocate the output buffer. Writes that
// would exceed the buffer's capacity are treated as an error.
func NewFixedBuilder(buffer []byte) *Builder {
return &Builder{
result: buffer,
fixedSize: true,
}
}
// SetError sets the value to be returned as the error from Bytes. Writes
// performed after calling SetError are ignored.
func (b *Builder) SetError(err error) {
b.err = err
}
// Bytes returns the bytes written by the builder or an error if one has
// occurred during building.
func (b *Builder) Bytes() ([]byte, error) {
if b.err != nil {
return nil, b.err
}
return b.result[b.offset:], nil
}
// BytesOrPanic returns the bytes written by the builder or panics if an error
// has occurred during building.
func (b *Builder) BytesOrPanic() []byte {
if b.err != nil {
panic(b.err)
}
return b.result[b.offset:]
}
// AddUint8 appends an 8-bit value to the byte string.
func (b *Builder) AddUint8(v uint8) {
b.add(byte(v))
}
// AddUint16 appends a big-endian, 16-bit value to the byte string.
func (b *Builder) AddUint16(v uint16) {
b.add(byte(v>>8), byte(v))
}
// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest
// byte of the 32-bit input value is silently truncated.
func (b *Builder) AddUint24(v uint32) {
b.add(byte(v>>16), byte(v>>8), byte(v))
}
// AddUint32 appends a big-endian, 32-bit value to the byte string.
func (b *Builder) AddUint32(v uint32) {
b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
}
// AddBytes appends a sequence of bytes to the byte string.
func (b *Builder) AddBytes(v []byte) {
b.add(v...)
}
// BuilderContinuation is a continuation-passing interface for building
// length-prefixed byte sequences. Builder methods for length-prefixed
// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
// supplied to them. The child builder passed to the continuation can be used
// to build the content of the length-prefixed sequence. For example:
//
// parent := cryptobyte.NewBuilder()
// parent.AddUint8LengthPrefixed(func (child *Builder) {
// child.AddUint8(42)
// child.AddUint8LengthPrefixed(func (grandchild *Builder) {
// grandchild.AddUint8(5)
// })
// })
//
// It is an error to write more bytes to the child than allowed by the reserved
// length prefix. After the continuation returns, the child must be considered
// invalid, i.e. users must not store any copies or references of the child
// that outlive the continuation.
//
// If the continuation panics with a value of type BuildError then the inner
// error will be returned as the error from Bytes. If the child panics
// otherwise then Bytes will repanic with the same value.
type BuilderContinuation func(child *Builder)
// BuildError wraps an error. If a BuilderContinuation panics with this value,
// the panic will be recovered and the inner error will be returned from
// Builder.Bytes.
type BuildError struct {
Err error
}
// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence.
func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) {
b.addLengthPrefixed(1, false, f)
}
// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence.
func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) {
b.addLengthPrefixed(2, false, f)
}
// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence.
func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) {
b.addLengthPrefixed(3, false, f)
}
// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence.
func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) {
b.addLengthPrefixed(4, false, f)
}
func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) {
if !*b.inContinuation {
*b.inContinuation = true
defer func() {
*b.inContinuation = false
r := recover()
if r == nil {
return
}
if buildError, ok := r.(BuildError); ok {
b.err = buildError.Err
} else {
panic(r)
}
}()
}
f(arg)
}
func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) {
// Subsequent writes can be ignored if the builder has encountered an error.
if b.err != nil {
return
}
offset := len(b.result)
b.add(make([]byte, lenLen)...)
if b.inContinuation == nil {
b.inContinuation = new(bool)
}
b.child = &Builder{
result: b.result,
fixedSize: b.fixedSize,
offset: offset,
pendingLenLen: lenLen,
pendingIsASN1: isASN1,
inContinuation: b.inContinuation,
}
b.callContinuation(f, b.child)
b.flushChild()
if b.child != nil {
panic("cryptobyte: internal error")
}
}
func (b *Builder) flushChild() {
if b.child == nil {
return
}
b.child.flushChild()
child := b.child
b.child = nil
if child.err != nil {
b.err = child.err
return
}
length := len(child.result) - child.pendingLenLen - child.offset
if length < 0 {
panic("cryptobyte: internal error") // result unexpectedly shrunk
}
if child.pendingIsASN1 {
// For ASN.1, we reserved a single byte for the length. If that turned out
// to be incorrect, we have to move the contents along in order to make
// space.
if child.pendingLenLen != 1 {
panic("cryptobyte: internal error")
}
var lenLen, lenByte uint8
if int64(length) > 0xfffffffe {
b.err = errors.New("pending ASN.1 child too long")
return
} else if length > 0xffffff {
lenLen = 5
lenByte = 0x80 | 4
} else if length > 0xffff {
lenLen = 4
lenByte = 0x80 | 3
} else if length > 0xff {
lenLen = 3
lenByte = 0x80 | 2
} else if length > 0x7f {
lenLen = 2
lenByte = 0x80 | 1
} else {
lenLen = 1
lenByte = uint8(length)
length = 0
}
// Insert the initial length byte, make space for successive length bytes,
// and adjust the offset.
child.result[child.offset] = lenByte
extraBytes := int(lenLen - 1)
if extraBytes != 0 {
child.add(make([]byte, extraBytes)...)
childStart := child.offset + child.pendingLenLen
copy(child.result[childStart+extraBytes:], child.result[childStart:])
}
child.offset++
child.pendingLenLen = extraBytes
}
l := length
for i := child.pendingLenLen - 1; i >= 0; i-- {
child.result[child.offset+i] = uint8(l)
l >>= 8
}
if l != 0 {
b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen)
return
}
if b.fixedSize && &b.result[0] != &child.result[0] {
panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
}
b.result = child.result
}
func (b *Builder) add(bytes ...byte) {
if b.err != nil {
return
}
if b.child != nil {
panic("cryptobyte: attempted write while child is pending")
}
if len(b.result)+len(bytes) < len(bytes) {
b.err = errors.New("cryptobyte: length overflow")
}
if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) {
b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer")
return
}
b.result = append(b.result, bytes...)
}
// Unwrite rolls back n bytes written directly to the Builder. An attempt by a
// child builder passed to a continuation to unwrite bytes from its parent will
// panic.
func (b *Builder) Unwrite(n int) {
if b.err != nil {
return
}
if b.child != nil {
panic("cryptobyte: attempted unwrite while child is pending")
}
length := len(b.result) - b.pendingLenLen - b.offset
if length < 0 {
panic("cryptobyte: internal error")
}
if n > length {
panic("cryptobyte: attempted to unwrite more than was written")
}
b.result = b.result[:len(b.result)-n]
}
// A MarshalingValue marshals itself into a Builder.
type MarshalingValue interface {
// Marshal is called by Builder.AddValue. It receives a pointer to a builder
// to marshal itself into. It may return an error that occurred during
// marshaling, such as unset or invalid values.
Marshal(b *Builder) error
}
// AddValue calls Marshal on v, passing a pointer to the builder to append to.
// If Marshal returns an error, it is set on the Builder so that subsequent
// appends don't have an effect.
func (b *Builder) AddValue(v MarshalingValue) {
err := v.Marshal(b)
if err != nil {
b.err = err
}
}

161
vendor/golang.org/x/crypto/cryptobyte/string.go generated vendored Normal file
View File

@ -0,0 +1,161 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cryptobyte contains types that help with parsing and constructing
// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage
// contains useful ASN.1 constants.)
//
// The String type is for parsing. It wraps a []byte slice and provides helper
// functions for consuming structures, value by value.
//
// The Builder type is for constructing messages. It providers helper functions
// for appending values and also for appending length-prefixed submessages
// without having to worry about calculating the length prefix ahead of time.
//
// See the documentation and examples for the Builder and String types to get
// started.
package cryptobyte // import "golang.org/x/crypto/cryptobyte"
// String represents a string of bytes. It provides methods for parsing
// fixed-length and length-prefixed values from it.
type String []byte
// read advances a String by n bytes and returns them. If less than n bytes
// remain, it returns nil.
func (s *String) read(n int) []byte {
if len(*s) < n || n < 0 {
return nil
}
v := (*s)[:n]
*s = (*s)[n:]
return v
}
// Skip advances the String by n byte and reports whether it was successful.
func (s *String) Skip(n int) bool {
return s.read(n) != nil
}
// ReadUint8 decodes an 8-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint8(out *uint8) bool {
v := s.read(1)
if v == nil {
return false
}
*out = uint8(v[0])
return true
}
// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint16(out *uint16) bool {
v := s.read(2)
if v == nil {
return false
}
*out = uint16(v[0])<<8 | uint16(v[1])
return true
}
// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint24(out *uint32) bool {
v := s.read(3)
if v == nil {
return false
}
*out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2])
return true
}
// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint32(out *uint32) bool {
v := s.read(4)
if v == nil {
return false
}
*out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3])
return true
}
func (s *String) readUnsigned(out *uint32, length int) bool {
v := s.read(length)
if v == nil {
return false
}
var result uint32
for i := 0; i < length; i++ {
result <<= 8
result |= uint32(v[i])
}
*out = result
return true
}
func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool {
lenBytes := s.read(lenLen)
if lenBytes == nil {
return false
}
var length uint32
for _, b := range lenBytes {
length = length << 8
length = length | uint32(b)
}
v := s.read(int(length))
if v == nil {
return false
}
*outChild = v
return true
}
// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value
// into out and advances over it. It reports whether the read was successful.
func (s *String) ReadUint8LengthPrefixed(out *String) bool {
return s.readLengthPrefixed(1, out)
}
// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit
// length-prefixed value into out and advances over it. It reports whether the
// read was successful.
func (s *String) ReadUint16LengthPrefixed(out *String) bool {
return s.readLengthPrefixed(2, out)
}
// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit
// length-prefixed value into out and advances over it. It reports whether
// the read was successful.
func (s *String) ReadUint24LengthPrefixed(out *String) bool {
return s.readLengthPrefixed(3, out)
}
// ReadBytes reads n bytes into out and advances over them. It reports
// whether the read was successful.
func (s *String) ReadBytes(out *[]byte, n int) bool {
v := s.read(n)
if v == nil {
return false
}
*out = v
return true
}
// CopyBytes copies len(out) bytes into out and advances over them. It reports
// whether the copy operation was successful
func (s *String) CopyBytes(out []byte) bool {
n := len(out)
v := s.read(n)
if v == nil {
return false
}
return copy(out, v) == n
}
// Empty reports whether the string does not contain any bytes.
func (s String) Empty() bool {
return len(s) == 0
}

33
vendor/golang.org/x/crypto/internal/subtle/aliasing.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
// Package subtle implements functions that are often useful in cryptographic
// code but require careful thought to use correctly.
package subtle // import "golang.org/x/crypto/internal/subtle"
import "unsafe"
// AnyOverlap reports whether x and y share memory at any (not necessarily
// corresponding) index. The memory beyond the slice length is ignored.
func AnyOverlap(x, y []byte) bool {
return len(x) > 0 && len(y) > 0 &&
uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
}
// InexactOverlap reports whether x and y share memory at any non-corresponding
// index. The memory beyond the slice length is ignored. Note that x and y can
// have different lengths and still not have any inexact overlap.
//
// InexactOverlap can be used to implement the requirements of the crypto/cipher
// AEAD, Block, BlockMode and Stream interfaces.
func InexactOverlap(x, y []byte) bool {
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
return false
}
return AnyOverlap(x, y)
}

View File

@ -0,0 +1,36 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego
// +build purego
// Package subtle implements functions that are often useful in cryptographic
// code but require careful thought to use correctly.
package subtle // import "golang.org/x/crypto/internal/subtle"
// This is the Google App Engine standard variant based on reflect
// because the unsafe package and cgo are disallowed.
import "reflect"
// AnyOverlap reports whether x and y share memory at any (not necessarily
// corresponding) index. The memory beyond the slice length is ignored.
func AnyOverlap(x, y []byte) bool {
return len(x) > 0 && len(y) > 0 &&
reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
}
// InexactOverlap reports whether x and y share memory at any non-corresponding
// index. The memory beyond the slice length is ignored. Note that x and y can
// have different lengths and still not have any inexact overlap.
//
// InexactOverlap can be used to implement the requirements of the crypto/cipher
// AEAD, Block, BlockMode and Stream interfaces.
func InexactOverlap(x, y []byte) bool {
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
return false
}
return AnyOverlap(x, y)
}

173
vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package secretbox encrypts and authenticates small messages.
Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with
secret-key cryptography. The length of messages is not hidden.
It is the caller's responsibility to ensure the uniqueness of noncesfor
example, by using nonce 1 for the first message, nonce 2 for the second
message, etc. Nonces are long enough that randomly generated nonces have
negligible risk of collision.
Messages should be small because:
1. The whole message needs to be held in memory to be processed.
2. Using large messages pressures implementations on small machines to decrypt
and process plaintext before authenticating it. This is very dangerous, and
this API does not allow it, but a protocol that uses excessive message sizes
might present some implementations with no other choice.
3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
4. Performance may be improved by working with messages that fit into data caches.
Thus large amounts of data should be chunked so that each message is small.
(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
chunk size.
This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
*/
package secretbox // import "golang.org/x/crypto/nacl/secretbox"
import (
"golang.org/x/crypto/internal/subtle"
"golang.org/x/crypto/poly1305"
"golang.org/x/crypto/salsa20/salsa"
)
// Overhead is the number of bytes of overhead when boxing a message.
const Overhead = poly1305.TagSize
// setup produces a sub-key and Salsa20 counter given a nonce and key.
func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
// We use XSalsa20 for encryption so first we need to generate a
// key and nonce with HSalsa20.
var hNonce [16]byte
copy(hNonce[:], nonce[:])
salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
// The final 8 bytes of the original nonce form the new nonce.
copy(counter[:], nonce[16:])
}
// sliceForAppend takes a slice and a requested number of bytes. It returns a
// slice with the contents of the given slice followed by that many bytes and a
// second slice that aliases into it and contains only the extra bytes. If the
// original slice has sufficient capacity then no allocation is performed.
func sliceForAppend(in []byte, n int) (head, tail []byte) {
if total := len(in) + n; cap(in) >= total {
head = in[:total]
} else {
head = make([]byte, total)
copy(head, in)
}
tail = head[len(in):]
return
}
// Seal appends an encrypted and authenticated copy of message to out, which
// must not overlap message. The key and nonce pair must be unique for each
// distinct message and the output will be Overhead bytes longer than message.
func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
var subKey [32]byte
var counter [16]byte
setup(&subKey, &counter, nonce, key)
// The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
// Salsa20 works with 64-byte blocks, we also generate 32 bytes of
// keystream as a side effect.
var firstBlock [64]byte
salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
var poly1305Key [32]byte
copy(poly1305Key[:], firstBlock[:])
ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
if subtle.AnyOverlap(out, message) {
panic("nacl: invalid buffer overlap")
}
// We XOR up to 32 bytes of message with the keystream generated from
// the first block.
firstMessageBlock := message
if len(firstMessageBlock) > 32 {
firstMessageBlock = firstMessageBlock[:32]
}
tagOut := out
out = out[poly1305.TagSize:]
for i, x := range firstMessageBlock {
out[i] = firstBlock[32+i] ^ x
}
message = message[len(firstMessageBlock):]
ciphertext := out
out = out[len(firstMessageBlock):]
// Now encrypt the rest.
counter[8] = 1
salsa.XORKeyStream(out, message, &counter, &subKey)
var tag [poly1305.TagSize]byte
poly1305.Sum(&tag, ciphertext, &poly1305Key)
copy(tagOut, tag[:])
return ret
}
// Open authenticates and decrypts a box produced by Seal and appends the
// message to out, which must not overlap box. The output will be Overhead
// bytes smaller than box.
func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
if len(box) < Overhead {
return nil, false
}
var subKey [32]byte
var counter [16]byte
setup(&subKey, &counter, nonce, key)
// The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
// Salsa20 works with 64-byte blocks, we also generate 32 bytes of
// keystream as a side effect.
var firstBlock [64]byte
salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
var poly1305Key [32]byte
copy(poly1305Key[:], firstBlock[:])
var tag [poly1305.TagSize]byte
copy(tag[:], box)
if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
return nil, false
}
ret, out := sliceForAppend(out, len(box)-Overhead)
if subtle.AnyOverlap(out, box) {
panic("nacl: invalid buffer overlap")
}
// We XOR up to 32 bytes of box with the keystream generated from
// the first block.
box = box[Overhead:]
firstMessageBlock := box
if len(firstMessageBlock) > 32 {
firstMessageBlock = firstMessageBlock[:32]
}
for i, x := range firstMessageBlock {
out[i] = firstBlock[32+i] ^ x
}
box = box[len(firstMessageBlock):]
out = out[len(firstMessageBlock):]
// Now decrypt the rest.
counter[8] = 1
salsa.XORKeyStream(out, box, &counter, &subKey)
return ret, true
}

40
vendor/golang.org/x/crypto/poly1305/bits_compat.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.13
// +build !go1.13
package poly1305
// Generic fallbacks for the math/bits intrinsics, copied from
// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
// variable time fallbacks until Go 1.13.
func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
sum = x + y + carry
carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
return
}
func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
diff = x - y - borrow
borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
return
}
func bitsMul64(x, y uint64) (hi, lo uint64) {
const mask32 = 1<<32 - 1
x0 := x & mask32
x1 := x >> 32
y0 := y & mask32
y1 := y >> 32
w0 := x0 * y0
t := x1*y0 + w0>>32
w1 := t & mask32
w2 := t >> 32
w1 += x0 * y1
hi = x1*y1 + w2 + w1>>32
lo = x * y
return
}

22
vendor/golang.org/x/crypto/poly1305/bits_go1.13.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.13
// +build go1.13
package poly1305
import "math/bits"
func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
return bits.Add64(x, y, carry)
}
func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
return bits.Sub64(x, y, borrow)
}
func bitsMul64(x, y uint64) (hi, lo uint64) {
return bits.Mul64(x, y)
}

10
vendor/golang.org/x/crypto/poly1305/mac_noasm.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
// +build !amd64,!ppc64le,!s390x !gc purego
package poly1305
type mac struct{ macGeneric }

99
vendor/golang.org/x/crypto/poly1305/poly1305.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package poly1305 implements Poly1305 one-time message authentication code as
// specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
//
// Poly1305 is a fast, one-time authentication function. It is infeasible for an
// attacker to generate an authenticator for a message without the key. However, a
// key must only be used for a single message. Authenticating two different
// messages with the same key allows an attacker to forge authenticators for other
// messages with the same key.
//
// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
// used with a fixed key in order to generate one-time keys from an nonce.
// However, in this package AES isn't used and the one-time key is specified
// directly.
package poly1305 // import "golang.org/x/crypto/poly1305"
import "crypto/subtle"
// TagSize is the size, in bytes, of a poly1305 authenticator.
const TagSize = 16
// Sum generates an authenticator for msg using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[16]byte, m []byte, key *[32]byte) {
h := New(key)
h.Write(m)
h.Sum(out[:0])
}
// Verify returns true if mac is a valid authenticator for m with the given key.
func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
var tmp [16]byte
Sum(&tmp, m, key)
return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
}
// New returns a new MAC computing an authentication
// tag of all data written to it with the given key.
// This allows writing the message progressively instead
// of passing it as a single slice. Common users should use
// the Sum function instead.
//
// The key must be unique for each message, as authenticating
// two different messages with the same key allows an attacker
// to forge messages at will.
func New(key *[32]byte) *MAC {
m := &MAC{}
initialize(key, &m.macState)
return m
}
// MAC is an io.Writer computing an authentication tag
// of the data written to it.
//
// MAC cannot be used like common hash.Hash implementations,
// because using a poly1305 key twice breaks its security.
// Therefore writing data to a running MAC after calling
// Sum or Verify causes it to panic.
type MAC struct {
mac // platform-dependent implementation
finalized bool
}
// Size returns the number of bytes Sum will return.
func (h *MAC) Size() int { return TagSize }
// Write adds more data to the running message authentication code.
// It never returns an error.
//
// It must not be called after the first call of Sum or Verify.
func (h *MAC) Write(p []byte) (n int, err error) {
if h.finalized {
panic("poly1305: write to MAC after Sum or Verify")
}
return h.mac.Write(p)
}
// Sum computes the authenticator of all data written to the
// message authentication code.
func (h *MAC) Sum(b []byte) []byte {
var mac [TagSize]byte
h.mac.Sum(&mac)
h.finalized = true
return append(b, mac[:]...)
}
// Verify returns whether the authenticator of all data written to
// the message authentication code matches the expected value.
func (h *MAC) Verify(expected []byte) bool {
var mac [TagSize]byte
h.mac.Sum(&mac)
h.finalized = true
return subtle.ConstantTimeCompare(expected, mac[:]) == 1
}

48
vendor/golang.org/x/crypto/poly1305/sum_amd64.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
package poly1305
//go:noescape
func update(state *macState, msg []byte)
// mac is a wrapper for macGeneric that redirects calls that would have gone to
// updateGeneric to update.
//
// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
// using function pointers would carry a major performance cost.
type mac struct{ macGeneric }
func (h *mac) Write(p []byte) (int, error) {
nn := len(p)
if h.offset > 0 {
n := copy(h.buffer[h.offset:], p)
if h.offset+n < TagSize {
h.offset += n
return nn, nil
}
p = p[n:]
h.offset = 0
update(&h.macState, h.buffer[:])
}
if n := len(p) - (len(p) % TagSize); n > 0 {
update(&h.macState, p[:n])
p = p[n:]
}
if len(p) > 0 {
h.offset += copy(h.buffer[h.offset:], p)
}
return nn, nil
}
func (h *mac) Sum(out *[16]byte) {
state := h.macState
if h.offset > 0 {
update(&state, h.buffer[:h.offset])
}
finalize(out, &state.h, &state.s)
}

109
vendor/golang.org/x/crypto/poly1305/sum_amd64.s generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
#include "textflag.h"
#define POLY1305_ADD(msg, h0, h1, h2) \
ADDQ 0(msg), h0; \
ADCQ 8(msg), h1; \
ADCQ $1, h2; \
LEAQ 16(msg), msg
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
MOVQ r0, AX; \
MULQ h0; \
MOVQ AX, t0; \
MOVQ DX, t1; \
MOVQ r0, AX; \
MULQ h1; \
ADDQ AX, t1; \
ADCQ $0, DX; \
MOVQ r0, t2; \
IMULQ h2, t2; \
ADDQ DX, t2; \
\
MOVQ r1, AX; \
MULQ h0; \
ADDQ AX, t1; \
ADCQ $0, DX; \
MOVQ DX, h0; \
MOVQ r1, t3; \
IMULQ h2, t3; \
MOVQ r1, AX; \
MULQ h1; \
ADDQ AX, t2; \
ADCQ DX, t3; \
ADDQ h0, t2; \
ADCQ $0, t3; \
\
MOVQ t0, h0; \
MOVQ t1, h1; \
MOVQ t2, h2; \
ANDQ $3, h2; \
MOVQ t2, t0; \
ANDQ $0xFFFFFFFFFFFFFFFC, t0; \
ADDQ t0, h0; \
ADCQ t3, h1; \
ADCQ $0, h2; \
SHRQ $2, t3, t2; \
SHRQ $2, t3; \
ADDQ t2, h0; \
ADCQ t3, h1; \
ADCQ $0, h2
// func update(state *[7]uint64, msg []byte)
TEXT ·update(SB), $0-32
MOVQ state+0(FP), DI
MOVQ msg_base+8(FP), SI
MOVQ msg_len+16(FP), R15
MOVQ 0(DI), R8 // h0
MOVQ 8(DI), R9 // h1
MOVQ 16(DI), R10 // h2
MOVQ 24(DI), R11 // r0
MOVQ 32(DI), R12 // r1
CMPQ R15, $16
JB bytes_between_0_and_15
loop:
POLY1305_ADD(SI, R8, R9, R10)
multiply:
POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
SUBQ $16, R15
CMPQ R15, $16
JAE loop
bytes_between_0_and_15:
TESTQ R15, R15
JZ done
MOVQ $1, BX
XORQ CX, CX
XORQ R13, R13
ADDQ R15, SI
flush_buffer:
SHLQ $8, BX, CX
SHLQ $8, BX
MOVB -1(SI), R13
XORQ R13, BX
DECQ SI
DECQ R15
JNZ flush_buffer
ADDQ BX, R8
ADCQ CX, R9
ADCQ $0, R10
MOVQ $16, R15
JMP multiply
done:
MOVQ R8, 0(DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
RET

310
vendor/golang.org/x/crypto/poly1305/sum_generic.go generated vendored Normal file
View File

@ -0,0 +1,310 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file provides the generic implementation of Sum and MAC. Other files
// might provide optimized assembly implementations of some of this code.
package poly1305
import "encoding/binary"
// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
// for a 64 bytes message is approximately
//
// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
//
// for some secret r and s. It can be computed sequentially like
//
// for len(msg) > 0:
// h += read(msg, 16)
// h *= r
// h %= 2¹³⁰ - 5
// return h + s
//
// All the complexity is about doing performant constant-time math on numbers
// larger than any available numeric type.
func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
h := newMACGeneric(key)
h.Write(msg)
h.Sum(out)
}
func newMACGeneric(key *[32]byte) macGeneric {
m := macGeneric{}
initialize(key, &m.macState)
return m
}
// macState holds numbers in saturated 64-bit little-endian limbs. That is,
// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
type macState struct {
// h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
// can grow larger during and after rounds. It must, however, remain below
// 2 * (2¹³⁰ - 5).
h [3]uint64
// r and s are the private key components.
r [2]uint64
s [2]uint64
}
type macGeneric struct {
macState
buffer [TagSize]byte
offset int
}
// Write splits the incoming message into TagSize chunks, and passes them to
// update. It buffers incomplete chunks.
func (h *macGeneric) Write(p []byte) (int, error) {
nn := len(p)
if h.offset > 0 {
n := copy(h.buffer[h.offset:], p)
if h.offset+n < TagSize {
h.offset += n
return nn, nil
}
p = p[n:]
h.offset = 0
updateGeneric(&h.macState, h.buffer[:])
}
if n := len(p) - (len(p) % TagSize); n > 0 {
updateGeneric(&h.macState, p[:n])
p = p[n:]
}
if len(p) > 0 {
h.offset += copy(h.buffer[h.offset:], p)
}
return nn, nil
}
// Sum flushes the last incomplete chunk from the buffer, if any, and generates
// the MAC output. It does not modify its state, in order to allow for multiple
// calls to Sum, even if no Write is allowed after Sum.
func (h *macGeneric) Sum(out *[TagSize]byte) {
state := h.macState
if h.offset > 0 {
updateGeneric(&state, h.buffer[:h.offset])
}
finalize(out, &state.h, &state.s)
}
// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
// clears some bits of the secret coefficient to make it possible to implement
// multiplication more efficiently.
const (
rMask0 = 0x0FFFFFFC0FFFFFFF
rMask1 = 0x0FFFFFFC0FFFFFFC
)
// initialize loads the 256-bit key into the two 128-bit secret values r and s.
func initialize(key *[32]byte, m *macState) {
m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
m.s[0] = binary.LittleEndian.Uint64(key[16:24])
m.s[1] = binary.LittleEndian.Uint64(key[24:32])
}
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
// bits.Mul64 and bits.Add64 intrinsics.
type uint128 struct {
lo, hi uint64
}
func mul64(a, b uint64) uint128 {
hi, lo := bitsMul64(a, b)
return uint128{lo, hi}
}
func add128(a, b uint128) uint128 {
lo, c := bitsAdd64(a.lo, b.lo, 0)
hi, c := bitsAdd64(a.hi, b.hi, c)
if c != 0 {
panic("poly1305: unexpected overflow")
}
return uint128{lo, hi}
}
func shiftRightBy2(a uint128) uint128 {
a.lo = a.lo>>2 | (a.hi&3)<<62
a.hi = a.hi >> 2
return a
}
// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
// 128 bits of message, it computes
//
// h₊ = (h + m) * r mod 2¹³⁰ - 5
//
// If the msg length is not a multiple of TagSize, it assumes the last
// incomplete chunk is the final one.
func updateGeneric(state *macState, msg []byte) {
h0, h1, h2 := state.h[0], state.h[1], state.h[2]
r0, r1 := state.r[0], state.r[1]
for len(msg) > 0 {
var c uint64
// For the first step, h + m, we use a chain of bits.Add64 intrinsics.
// The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
// reduced at the end of the multiplication below.
//
// The spec requires us to set a bit just above the message size, not to
// hide leading zeroes. For full chunks, that's 1 << 128, so we can just
// add 1 to the most significant (2¹²⁸) limb, h2.
if len(msg) >= TagSize {
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
h2 += c + 1
msg = msg[TagSize:]
} else {
var buf [TagSize]byte
copy(buf[:], msg)
buf[len(msg)] = 1
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
h2 += c
msg = nil
}
// Multiplication of big number limbs is similar to elementary school
// columnar multiplication. Instead of digits, there are 64-bit limbs.
//
// We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
//
// h2 h1 h0 x
// r1 r0 =
// ----------------
// h2r0 h1r0 h0r0 <-- individual 128-bit products
// + h2r1 h1r1 h0r1
// ------------------------
// m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
// ------------------------
// m3.hi m2.hi m1.hi m0.hi <-- carry propagation
// + m3.lo m2.lo m1.lo m0.lo
// -------------------------------
// t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
//
// The main difference from pen-and-paper multiplication is that we do
// carry propagation in a separate step, as if we wrote two digit sums
// at first (the 128-bit limbs), and then carried the tens all at once.
h0r0 := mul64(h0, r0)
h1r0 := mul64(h1, r0)
h2r0 := mul64(h2, r0)
h0r1 := mul64(h0, r1)
h1r1 := mul64(h1, r1)
h2r1 := mul64(h2, r1)
// Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
// top 4 bits cleared by rMask{0,1}, we know that their product is not going
// to overflow 64 bits, so we can ignore the high part of the products.
//
// This also means that the product doesn't have a fifth limb (t4).
if h2r0.hi != 0 {
panic("poly1305: unexpected overflow")
}
if h2r1.hi != 0 {
panic("poly1305: unexpected overflow")
}
m0 := h0r0
m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
m3 := h2r1
t0 := m0.lo
t1, c := bitsAdd64(m1.lo, m0.hi, 0)
t2, c := bitsAdd64(m2.lo, m1.hi, c)
t3, _ := bitsAdd64(m3.lo, m2.hi, c)
// Now we have the result as 4 64-bit limbs, and we need to reduce it
// modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
// a cheap partial reduction according to the reduction identity
//
// c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
//
// because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
// likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
// assumptions we make about h in the rest of the code.
//
// See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
// We split the final result at the 2¹³⁰ mark into h and cc, the carry.
// Note that the carry bits are effectively shifted left by 2, in other
// words, cc = c * 4 for the c in the reduction identity.
h0, h1, h2 = t0, t1, t2&maskLow2Bits
cc := uint128{t2 & maskNotLow2Bits, t3}
// To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
h0, c = bitsAdd64(h0, cc.lo, 0)
h1, c = bitsAdd64(h1, cc.hi, c)
h2 += c
cc = shiftRightBy2(cc)
h0, c = bitsAdd64(h0, cc.lo, 0)
h1, c = bitsAdd64(h1, cc.hi, c)
h2 += c
// h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
//
// 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
}
state.h[0], state.h[1], state.h[2] = h0, h1, h2
}
const (
maskLow2Bits uint64 = 0x0000000000000003
maskNotLow2Bits uint64 = ^maskLow2Bits
)
// select64 returns x if v == 1 and y if v == 0, in constant time.
func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
const (
p0 = 0xFFFFFFFFFFFFFFFB
p1 = 0xFFFFFFFFFFFFFFFF
p2 = 0x0000000000000003
)
// finalize completes the modular reduction of h and computes
//
// out = h + s mod 2¹²⁸
//
func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
h0, h1, h2 := h[0], h[1], h[2]
// After the partial reduction in updateGeneric, h might be more than
// 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
// in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
// result if the subtraction underflows, and t otherwise.
hMinusP0, b := bitsSub64(h0, p0, 0)
hMinusP1, b := bitsSub64(h1, p1, b)
_, b = bitsSub64(h2, p2, b)
// h = h if h < p else h - p
h0 = select64(b, h0, hMinusP0)
h1 = select64(b, h1, hMinusP1)
// Finally, we compute the last Poly1305 step
//
// tag = h + s mod 2¹²⁸
//
// by just doing a wide addition with the 128 low bits of h and discarding
// the overflow.
h0, c := bitsAdd64(h0, s[0], 0)
h1, _ = bitsAdd64(h1, s[1], c)
binary.LittleEndian.PutUint64(out[0:8], h0)
binary.LittleEndian.PutUint64(out[8:16], h1)
}

48
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
package poly1305
//go:noescape
func update(state *macState, msg []byte)
// mac is a wrapper for macGeneric that redirects calls that would have gone to
// updateGeneric to update.
//
// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
// using function pointers would carry a major performance cost.
type mac struct{ macGeneric }
func (h *mac) Write(p []byte) (int, error) {
nn := len(p)
if h.offset > 0 {
n := copy(h.buffer[h.offset:], p)
if h.offset+n < TagSize {
h.offset += n
return nn, nil
}
p = p[n:]
h.offset = 0
update(&h.macState, h.buffer[:])
}
if n := len(p) - (len(p) % TagSize); n > 0 {
update(&h.macState, p[:n])
p = p[n:]
}
if len(p) > 0 {
h.offset += copy(h.buffer[h.offset:], p)
}
return nn, nil
}
func (h *mac) Sum(out *[16]byte) {
state := h.macState
if h.offset > 0 {
update(&state, h.buffer[:h.offset])
}
finalize(out, &state.h, &state.s)
}

182
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s generated vendored Normal file
View File

@ -0,0 +1,182 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
#include "textflag.h"
// This was ported from the amd64 implementation.
#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \
MOVD (msg), t0; \
MOVD 8(msg), t1; \
MOVD $1, t2; \
ADDC t0, h0, h0; \
ADDE t1, h1, h1; \
ADDE t2, h2; \
ADD $16, msg
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \
MULLD r0, h0, t0; \
MULLD r0, h1, t4; \
MULHDU r0, h0, t1; \
MULHDU r0, h1, t5; \
ADDC t4, t1, t1; \
MULLD r0, h2, t2; \
ADDZE t5; \
MULHDU r1, h0, t4; \
MULLD r1, h0, h0; \
ADD t5, t2, t2; \
ADDC h0, t1, t1; \
MULLD h2, r1, t3; \
ADDZE t4, h0; \
MULHDU r1, h1, t5; \
MULLD r1, h1, t4; \
ADDC t4, t2, t2; \
ADDE t5, t3, t3; \
ADDC h0, t2, t2; \
MOVD $-4, t4; \
MOVD t0, h0; \
MOVD t1, h1; \
ADDZE t3; \
ANDCC $3, t2, h2; \
AND t2, t4, t0; \
ADDC t0, h0, h0; \
ADDE t3, h1, h1; \
SLD $62, t3, t4; \
SRD $2, t2; \
ADDZE h2; \
OR t4, t2, t2; \
SRD $2, t3; \
ADDC t2, h0, h0; \
ADDE t3, h1, h1; \
ADDZE h2
DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
GLOBL ·poly1305Mask<>(SB), RODATA, $16
// func update(state *[7]uint64, msg []byte)
TEXT ·update(SB), $0-32
MOVD state+0(FP), R3
MOVD msg_base+8(FP), R4
MOVD msg_len+16(FP), R5
MOVD 0(R3), R8 // h0
MOVD 8(R3), R9 // h1
MOVD 16(R3), R10 // h2
MOVD 24(R3), R11 // r0
MOVD 32(R3), R12 // r1
CMP R5, $16
BLT bytes_between_0_and_15
loop:
POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22)
multiply:
POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21)
ADD $-16, R5
CMP R5, $16
BGE loop
bytes_between_0_and_15:
CMP R5, $0
BEQ done
MOVD $0, R16 // h0
MOVD $0, R17 // h1
flush_buffer:
CMP R5, $8
BLE just1
MOVD $8, R21
SUB R21, R5, R21
// Greater than 8 -- load the rightmost remaining bytes in msg
// and put into R17 (h1)
MOVD (R4)(R21), R17
MOVD $16, R22
// Find the offset to those bytes
SUB R5, R22, R22
SLD $3, R22
// Shift to get only the bytes in msg
SRD R22, R17, R17
// Put 1 at high end
MOVD $1, R23
SLD $3, R21
SLD R21, R23, R23
OR R23, R17, R17
// Remainder is 8
MOVD $8, R5
just1:
CMP R5, $8
BLT less8
// Exactly 8
MOVD (R4), R16
CMP R17, $0
// Check if we've already set R17; if not
// set 1 to indicate end of msg.
BNE carry
MOVD $1, R17
BR carry
less8:
MOVD $0, R16 // h0
MOVD $0, R22 // shift count
CMP R5, $4
BLT less4
MOVWZ (R4), R16
ADD $4, R4
ADD $-4, R5
MOVD $32, R22
less4:
CMP R5, $2
BLT less2
MOVHZ (R4), R21
SLD R22, R21, R21
OR R16, R21, R16
ADD $16, R22
ADD $-2, R5
ADD $2, R4
less2:
CMP R5, $0
BEQ insert1
MOVBZ (R4), R21
SLD R22, R21, R21
OR R16, R21, R16
ADD $8, R22
insert1:
// Insert 1 at end of msg
MOVD $1, R21
SLD R22, R21, R21
OR R16, R21, R16
carry:
// Add new values to h0, h1, h2
ADDC R16, R8
ADDE R17, R9
ADDZE R10, R10
MOVD $16, R5
ADD R5, R4
BR multiply
done:
// Save h0, h1, h2 in state
MOVD R8, 0(R3)
MOVD R9, 8(R3)
MOVD R10, 16(R3)
RET

76
vendor/golang.org/x/crypto/poly1305/sum_s390x.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
package poly1305
import (
"golang.org/x/sys/cpu"
)
// updateVX is an assembly implementation of Poly1305 that uses vector
// instructions. It must only be called if the vector facility (vx) is
// available.
//go:noescape
func updateVX(state *macState, msg []byte)
// mac is a replacement for macGeneric that uses a larger buffer and redirects
// calls that would have gone to updateGeneric to updateVX if the vector
// facility is installed.
//
// A larger buffer is required for good performance because the vector
// implementation has a higher fixed cost per call than the generic
// implementation.
type mac struct {
macState
buffer [16 * TagSize]byte // size must be a multiple of block size (16)
offset int
}
func (h *mac) Write(p []byte) (int, error) {
nn := len(p)
if h.offset > 0 {
n := copy(h.buffer[h.offset:], p)
if h.offset+n < len(h.buffer) {
h.offset += n
return nn, nil
}
p = p[n:]
h.offset = 0
if cpu.S390X.HasVX {
updateVX(&h.macState, h.buffer[:])
} else {
updateGeneric(&h.macState, h.buffer[:])
}
}
tail := len(p) % len(h.buffer) // number of bytes to copy into buffer
body := len(p) - tail // number of bytes to process now
if body > 0 {
if cpu.S390X.HasVX {
updateVX(&h.macState, p[:body])
} else {
updateGeneric(&h.macState, p[:body])
}
}
h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0
return nn, nil
}
func (h *mac) Sum(out *[TagSize]byte) {
state := h.macState
remainder := h.buffer[:h.offset]
// Use the generic implementation if we have 2 or fewer blocks left
// to sum. The vector implementation has a higher startup time.
if cpu.S390X.HasVX && len(remainder) > 2*TagSize {
updateVX(&state, remainder)
} else if len(remainder) > 0 {
updateGeneric(&state, remainder)
}
finalize(out, &state.h, &state.s)
}

504
vendor/golang.org/x/crypto/poly1305/sum_s390x.s generated vendored Normal file
View File

@ -0,0 +1,504 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
#include "textflag.h"
// This implementation of Poly1305 uses the vector facility (vx)
// to process up to 2 blocks (32 bytes) per iteration using an
// algorithm based on the one described in:
//
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
//
// This algorithm uses 5 26-bit limbs to represent a 130-bit
// value. These limbs are, for the most part, zero extended and
// placed into 64-bit vector register elements. Each vector
// register is 128-bits wide and so holds 2 of these elements.
// Using 26-bit limbs allows us plenty of headroom to accomodate
// accumulations before and after multiplication without
// overflowing either 32-bits (before multiplication) or 64-bits
// (after multiplication).
//
// In order to parallelise the operations required to calculate
// the sum we use two separate accumulators and then sum those
// in an extra final step. For compatibility with the generic
// implementation we perform this summation at the end of every
// updateVX call.
//
// To use two accumulators we must multiply the message blocks
// by r² rather than r. Only the final message block should be
// multiplied by r.
//
// Example:
//
// We want to calculate the sum (h) for a 64 byte message (m):
//
// h = m[0:16]r + m[16:32]r³ + m[32:48]r² + m[48:64]r
//
// To do this we split the calculation into the even indices
// and odd indices of the message. These form our SIMD 'lanes':
//
// h = m[ 0:16]r + m[32:48]r² + <- lane 0
// m[16:32]r³ + m[48:64]r <- lane 1
//
// To calculate this iteratively we refactor so that both lanes
// are written in terms of r² and r:
//
// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0
// (m[16:32]r² + m[48:64])r <- lane 1
// ^ ^
// | coefficients for second iteration
// coefficients for first iteration
//
// So in this case we would have two iterations. In the first
// both lanes are multiplied by r². In the second only the
// first lane is multiplied by r² and the second lane is
// instead multiplied by r. This gives use the odd and even
// powers of r that we need from the original equation.
//
// Notation:
//
// h - accumulator
// r - key
// m - message
//
// [a, b] - SIMD register holding two 64-bit values
// [a, b, c, d] - SIMD register holding four 32-bit values
// x[n] - limb n of variable x with bit width i
//
// Limbs are expressed in little endian order, so for 26-bit
// limbs x[4] will be the most significant limb and x[0]
// will be the least significant limb.
// masking constants
#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits
#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits
// expansion constants (see EXPAND macro)
#define EX0 V2
#define EX1 V3
#define EX2 V4
// key (r², r or 1 depending on context)
#define R_0 V5
#define R_1 V6
#define R_2 V7
#define R_3 V8
#define R_4 V9
// precalculated coefficients (5r², 5r or 0 depending on context)
#define R5_1 V10
#define R5_2 V11
#define R5_3 V12
#define R5_4 V13
// message block (m)
#define M_0 V14
#define M_1 V15
#define M_2 V16
#define M_3 V17
#define M_4 V18
// accumulator (h)
#define H_0 V19
#define H_1 V20
#define H_2 V21
#define H_3 V22
#define H_4 V23
// temporary registers (for short-lived values)
#define T_0 V24
#define T_1 V25
#define T_2 V26
#define T_3 V27
#define T_4 V28
GLOBL ·constants<>(SB), RODATA, $0x30
// EX0
DATA ·constants<>+0x00(SB)/8, $0x0006050403020100
DATA ·constants<>+0x08(SB)/8, $0x1016151413121110
// EX1
DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706
DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716
// EX2
DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d
DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d
// MULTIPLY multiplies each lane of f and g, partially reduced
// modulo 2¹³ - 5. The result, h, consists of partial products
// in each lane that need to be reduced further to produce the
// final result.
//
// h = (fg) % 2¹³ + (5fg) / 2¹³
//
// Note that the multiplication by 5 of the high bits is
// achieved by precalculating the multiplication of four of the
// g coefficients by 5. These are g51-g54.
#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
VMLOF f0, g0, h0 \
VMLOF f0, g3, h3 \
VMLOF f0, g1, h1 \
VMLOF f0, g4, h4 \
VMLOF f0, g2, h2 \
VMLOF f1, g54, T_0 \
VMLOF f1, g2, T_3 \
VMLOF f1, g0, T_1 \
VMLOF f1, g3, T_4 \
VMLOF f1, g1, T_2 \
VMALOF f2, g53, h0, h0 \
VMALOF f2, g1, h3, h3 \
VMALOF f2, g54, h1, h1 \
VMALOF f2, g2, h4, h4 \
VMALOF f2, g0, h2, h2 \
VMALOF f3, g52, T_0, T_0 \
VMALOF f3, g0, T_3, T_3 \
VMALOF f3, g53, T_1, T_1 \
VMALOF f3, g1, T_4, T_4 \
VMALOF f3, g54, T_2, T_2 \
VMALOF f4, g51, h0, h0 \
VMALOF f4, g54, h3, h3 \
VMALOF f4, g52, h1, h1 \
VMALOF f4, g0, h4, h4 \
VMALOF f4, g53, h2, h2 \
VAG T_0, h0, h0 \
VAG T_3, h3, h3 \
VAG T_1, h1, h1 \
VAG T_4, h4, h4 \
VAG T_2, h2, h2
// REDUCE performs the following carry operations in four
// stages, as specified in Bernstein & Schwabe:
//
// 1: h[0]->h[1] h[3]->h[4]
// 2: h[1]->h[2] h[4]->h[0]
// 3: h[0]->h[1] h[2]->h[3]
// 4: h[3]->h[4]
//
// The result is that all of the limbs are limited to 26-bits
// except for h[1] and h[4] which are limited to 27-bits.
//
// Note that although each limb is aligned at 26-bit intervals
// they may contain values that exceed 2² - 1, hence the need
// to carry the excess bits in each limb.
#define REDUCE(h0, h1, h2, h3, h4) \
VESRLG $26, h0, T_0 \
VESRLG $26, h3, T_1 \
VN MOD26, h0, h0 \
VN MOD26, h3, h3 \
VAG T_0, h1, h1 \
VAG T_1, h4, h4 \
VESRLG $26, h1, T_2 \
VESRLG $26, h4, T_3 \
VN MOD26, h1, h1 \
VN MOD26, h4, h4 \
VESLG $2, T_3, T_4 \
VAG T_3, T_4, T_4 \
VAG T_2, h2, h2 \
VAG T_4, h0, h0 \
VESRLG $26, h2, T_0 \
VESRLG $26, h0, T_1 \
VN MOD26, h2, h2 \
VN MOD26, h0, h0 \
VAG T_0, h3, h3 \
VAG T_1, h1, h1 \
VESRLG $26, h3, T_2 \
VN MOD26, h3, h3 \
VAG T_2, h4, h4
// EXPAND splits the 128-bit little-endian values in0 and in1
// into 26-bit big-endian limbs and places the results into
// the first and second lane of d[0:4] respectively.
//
// The EX0, EX1 and EX2 constants are arrays of byte indices
// for permutation. The permutation both reverses the bytes
// in the input and ensures the bytes are copied into the
// destination limb ready to be shifted into their final
// position.
#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
VPERM in0, in1, EX0, d0 \
VPERM in0, in1, EX1, d2 \
VPERM in0, in1, EX2, d4 \
VESRLG $26, d0, d1 \
VESRLG $30, d2, d3 \
VESRLG $4, d2, d2 \
VN MOD26, d0, d0 \ // [in0[0], in1[0]]
VN MOD26, d3, d3 \ // [in0[3], in1[3]]
VN MOD26, d1, d1 \ // [in0[1], in1[1]]
VN MOD24, d4, d4 \ // [in0[4], in1[4]]
VN MOD26, d2, d2 // [in0[2], in1[2]]
// func updateVX(state *macState, msg []byte)
TEXT ·updateVX(SB), NOSPLIT, $0
MOVD state+0(FP), R1
LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len
// load EX0, EX1 and EX2
MOVD $·constants<>(SB), R5
VLM (R5), EX0, EX2
// generate masks
VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff]
VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff]
// load h (accumulator) and r (key) from state
VZERO T_1 // [0, 0]
VL 0(R1), T_0 // [h[0], h[1]]
VLEG $0, 16(R1), T_1 // [h[2], 0]
VL 24(R1), T_2 // [r[0], r[1]]
VPDI $0, T_0, T_2, T_3 // [h[0], r[0]]
VPDI $5, T_0, T_2, T_4 // [h[1], r[1]]
// unpack h and r into 26-bit limbs
// note: h[2] may have the low 3 bits set, so h[4] is a 27-bit value
VN MOD26, T_3, H_0 // [h[0], r[0]]
VZERO H_1 // [0, 0]
VZERO H_3 // [0, 0]
VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out
VESLG $24, T_1, T_1 // [h[2]<<24, 0]
VERIMG $-26&63, T_3, MOD26, H_1 // [h[1], r[1]]
VESRLG $+52&63, T_3, H_2 // [h[2], r[2]] - low 12 bits only
VERIMG $-14&63, T_4, MOD26, H_3 // [h[1], r[1]]
VESRLG $40, T_4, H_4 // [h[4], r[4]] - low 24 bits only
VERIMG $+12&63, T_4, T_0, H_2 // [h[2], r[2]] - complete
VO T_1, H_4, H_4 // [h[4], r[4]] - complete
// replicate r across all 4 vector elements
VREPF $3, H_0, R_0 // [r[0], r[0], r[0], r[0]]
VREPF $3, H_1, R_1 // [r[1], r[1], r[1], r[1]]
VREPF $3, H_2, R_2 // [r[2], r[2], r[2], r[2]]
VREPF $3, H_3, R_3 // [r[3], r[3], r[3], r[3]]
VREPF $3, H_4, R_4 // [r[4], r[4], r[4], r[4]]
// zero out lane 1 of h
VLEIG $1, $0, H_0 // [h[0], 0]
VLEIG $1, $0, H_1 // [h[1], 0]
VLEIG $1, $0, H_2 // [h[2], 0]
VLEIG $1, $0, H_3 // [h[3], 0]
VLEIG $1, $0, H_4 // [h[4], 0]
// calculate 5r (ignore least significant limb)
VREPIF $5, T_0
VMLF T_0, R_1, R5_1 // [5r[1], 5r[1], 5r[1], 5r[1]]
VMLF T_0, R_2, R5_2 // [5r[2], 5r[2], 5r[2], 5r[2]]
VMLF T_0, R_3, R5_3 // [5r[3], 5r[3], 5r[3], 5r[3]]
VMLF T_0, R_4, R5_4 // [5r[4], 5r[4], 5r[4], 5r[4]]
// skip r² calculation if we are only calculating one block
CMPBLE R3, $16, skip
// calculate r²
MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4)
REDUCE(M_0, M_1, M_2, M_3, M_4)
VGBM $0x0f0f, T_0
VERIMG $0, M_0, T_0, R_0 // [r[0], r²[0], r[0], r²[0]]
VERIMG $0, M_1, T_0, R_1 // [r[1], r²[1], r[1], r²[1]]
VERIMG $0, M_2, T_0, R_2 // [r[2], r²[2], r[2], r²[2]]
VERIMG $0, M_3, T_0, R_3 // [r[3], r²[3], r[3], r²[3]]
VERIMG $0, M_4, T_0, R_4 // [r[4], r²[4], r[4], r²[4]]
// calculate 5r² (ignore least significant limb)
VREPIF $5, T_0
VMLF T_0, R_1, R5_1 // [5r[1], 5r²[1], 5r[1], 5r²[1]]
VMLF T_0, R_2, R5_2 // [5r[2], 5r²[2], 5r[2], 5r²[2]]
VMLF T_0, R_3, R5_3 // [5r[3], 5r²[3], 5r[3], 5r²[3]]
VMLF T_0, R_4, R5_4 // [5r[4], 5r²[4], 5r[4], 5r²[4]]
loop:
CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients
// load next 2 blocks from message
VLM (R2), T_0, T_1
// update message slice
SUB $32, R3
MOVD $32(R2), R2
// unpack message blocks into 26-bit big-endian limbs
EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
// add 2¹² to each message block value
VLEIB $4, $1, M_4
VLEIB $12, $1, M_4
multiply:
// accumulate the incoming message
VAG H_0, M_0, M_0
VAG H_3, M_3, M_3
VAG H_1, M_1, M_1
VAG H_4, M_4, M_4
VAG H_2, M_2, M_2
// multiply the accumulator by the key coefficient
MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
// carry and partially reduce the partial products
REDUCE(H_0, H_1, H_2, H_3, H_4)
CMPBNE R3, $0, loop
finish:
// sum lane 0 and lane 1 and put the result in lane 1
VZERO T_0
VSUMQG H_0, T_0, H_0
VSUMQG H_3, T_0, H_3
VSUMQG H_1, T_0, H_1
VSUMQG H_4, T_0, H_4
VSUMQG H_2, T_0, H_2
// reduce again after summation
// TODO(mundaym): there might be a more efficient way to do this
// now that we only have 1 active lane. For example, we could
// simultaneously pack the values as we reduce them.
REDUCE(H_0, H_1, H_2, H_3, H_4)
// carry h[1] through to h[4] so that only h[4] can exceed 2² - 1
// TODO(mundaym): in testing this final carry was unnecessary.
// Needs a proof before it can be removed though.
VESRLG $26, H_1, T_1
VN MOD26, H_1, H_1
VAQ T_1, H_2, H_2
VESRLG $26, H_2, T_2
VN MOD26, H_2, H_2
VAQ T_2, H_3, H_3
VESRLG $26, H_3, T_3
VN MOD26, H_3, H_3
VAQ T_3, H_4, H_4
// h is now < 2(2¹³ - 5)
// Pack each lane in h[0:4] into h[0:1].
VESLG $26, H_1, H_1
VESLG $26, H_3, H_3
VO H_0, H_1, H_0
VO H_2, H_3, H_2
VESLG $4, H_2, H_2
VLEIB $7, $48, H_1
VSLB H_1, H_2, H_2
VO H_0, H_2, H_0
VLEIB $7, $104, H_1
VSLB H_1, H_4, H_3
VO H_3, H_0, H_0
VLEIB $7, $24, H_1
VSRLB H_1, H_4, H_1
// update state
VSTEG $1, H_0, 0(R1)
VSTEG $0, H_0, 8(R1)
VSTEG $1, H_1, 16(R1)
RET
b2: // 2 or fewer blocks remaining
CMPBLE R3, $16, b1
// Load the 2 remaining blocks (17-32 bytes remaining).
MOVD $-17(R3), R0 // index of final byte to load modulo 16
VL (R2), T_0 // load full 16 byte block
VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes
// The Poly1305 algorithm requires that a 1 bit be appended to
// each message block. If the final block is less than 16 bytes
// long then it is easiest to insert the 1 before the message
// block is split into 26-bit limbs. If, on the other hand, the
// final message block is 16 bytes long then we append the 1 bit
// after expansion as normal.
MOVBZ $1, R0
MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16)
CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long
VLVGB R3, R0, T_1 // insert 1 into the byte at index R3
// Split both blocks into 26-bit limbs in the appropriate lanes.
EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
// Append a 1 byte to the end of the second to last block.
VLEIB $4, $1, M_4
// Append a 1 byte to the end of the last block only if it is a
// full 16 byte block.
CMPBNE R3, $16, 2(PC)
VLEIB $12, $1, M_4
// Finally, set up the coefficients for the final multiplication.
// We have previously saved r and 5r in the 32-bit even indexes
// of the R_[0-4] and R5_[1-4] coefficient registers.
//
// We want lane 0 to be multiplied by r² so that can be kept the
// same. We want lane 1 to be multiplied by r so we need to move
// the saved r value into the 32-bit odd index in lane 1 by
// rotating the 64-bit lane by 32.
VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only
VERIMG $32, R_0, T_0, R_0 // [_, r²[0], _, r[0]]
VERIMG $32, R_1, T_0, R_1 // [_, r²[1], _, r[1]]
VERIMG $32, R_2, T_0, R_2 // [_, r²[2], _, r[2]]
VERIMG $32, R_3, T_0, R_3 // [_, r²[3], _, r[3]]
VERIMG $32, R_4, T_0, R_4 // [_, r²[4], _, r[4]]
VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²[1], _, 5r[1]]
VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²[2], _, 5r[2]]
VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²[3], _, 5r[3]]
VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²[4], _, 5r[4]]
MOVD $0, R3
BR multiply
skip:
CMPBEQ R3, $0, finish
b1: // 1 block remaining
// Load the final block (1-16 bytes). This will be placed into
// lane 0.
MOVD $-1(R3), R0
VLL R0, (R2), T_0 // pad to 16 bytes with zeros
// The Poly1305 algorithm requires that a 1 bit be appended to
// each message block. If the final block is less than 16 bytes
// long then it is easiest to insert the 1 before the message
// block is split into 26-bit limbs. If, on the other hand, the
// final message block is 16 bytes long then we append the 1 bit
// after expansion as normal.
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, T_0
// Set the message block in lane 1 to the value 0 so that it
// can be accumulated without affecting the final result.
VZERO T_1
// Split the final message block into 26-bit limbs in lane 0.
// Lane 1 will be contain 0.
EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
// Append a 1 byte to the end of the last block only if it is a
// full 16 byte block.
CMPBNE R3, $16, 2(PC)
VLEIB $4, $1, M_4
// We have previously saved r and 5r in the 32-bit even indexes
// of the R_[0-4] and R5_[1-4] coefficient registers.
//
// We want lane 0 to be multiplied by r so we need to move the
// saved r value into the 32-bit odd index in lane 0. We want
// lane 1 to be set to the value 1. This makes multiplication
// a no-op. We do this by setting lane 1 in every register to 0
// and then just setting the 32-bit index 3 in R_0 to 1.
VZERO T_0
MOVD $0, R0
MOVD $0x10111213, R12
VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000]
VPERM T_0, R_0, T_1, R_0 // [_, r[0], _, 0]
VPERM T_0, R_1, T_1, R_1 // [_, r[1], _, 0]
VPERM T_0, R_2, T_1, R_2 // [_, r[2], _, 0]
VPERM T_0, R_3, T_1, R_3 // [_, r[3], _, 0]
VPERM T_0, R_4, T_1, R_4 // [_, r[4], _, 0]
VPERM T_0, R5_1, T_1, R5_1 // [_, 5r[1], _, 0]
VPERM T_0, R5_2, T_1, R5_2 // [_, 5r[2], _, 0]
VPERM T_0, R5_3, T_1, R5_3 // [_, 5r[3], _, 0]
VPERM T_0, R5_4, T_1, R5_4 // [_, 5r[4], _, 0]
// Set the value of lane 1 to be 1.
VLEIF $3, $1, R_0 // [_, r[0], _, 1]
MOVD $0, R3
BR multiply

144
vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package salsa provides low-level access to functions in the Salsa family.
package salsa // import "golang.org/x/crypto/salsa20/salsa"
// Sigma is the Salsa20 constant for 256-bit keys.
var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte
// key k, and 16-byte constant c, and puts the result into the 32-byte array
// out.
func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
for i := 0; i < 20; i += 2 {
u := x0 + x12
x4 ^= u<<7 | u>>(32-7)
u = x4 + x0
x8 ^= u<<9 | u>>(32-9)
u = x8 + x4
x12 ^= u<<13 | u>>(32-13)
u = x12 + x8
x0 ^= u<<18 | u>>(32-18)
u = x5 + x1
x9 ^= u<<7 | u>>(32-7)
u = x9 + x5
x13 ^= u<<9 | u>>(32-9)
u = x13 + x9
x1 ^= u<<13 | u>>(32-13)
u = x1 + x13
x5 ^= u<<18 | u>>(32-18)
u = x10 + x6
x14 ^= u<<7 | u>>(32-7)
u = x14 + x10
x2 ^= u<<9 | u>>(32-9)
u = x2 + x14
x6 ^= u<<13 | u>>(32-13)
u = x6 + x2
x10 ^= u<<18 | u>>(32-18)
u = x15 + x11
x3 ^= u<<7 | u>>(32-7)
u = x3 + x15
x7 ^= u<<9 | u>>(32-9)
u = x7 + x3
x11 ^= u<<13 | u>>(32-13)
u = x11 + x7
x15 ^= u<<18 | u>>(32-18)
u = x0 + x3
x1 ^= u<<7 | u>>(32-7)
u = x1 + x0
x2 ^= u<<9 | u>>(32-9)
u = x2 + x1
x3 ^= u<<13 | u>>(32-13)
u = x3 + x2
x0 ^= u<<18 | u>>(32-18)
u = x5 + x4
x6 ^= u<<7 | u>>(32-7)
u = x6 + x5
x7 ^= u<<9 | u>>(32-9)
u = x7 + x6
x4 ^= u<<13 | u>>(32-13)
u = x4 + x7
x5 ^= u<<18 | u>>(32-18)
u = x10 + x9
x11 ^= u<<7 | u>>(32-7)
u = x11 + x10
x8 ^= u<<9 | u>>(32-9)
u = x8 + x11
x9 ^= u<<13 | u>>(32-13)
u = x9 + x8
x10 ^= u<<18 | u>>(32-18)
u = x15 + x14
x12 ^= u<<7 | u>>(32-7)
u = x12 + x15
x13 ^= u<<9 | u>>(32-9)
u = x13 + x12
x14 ^= u<<13 | u>>(32-13)
u = x14 + x13
x15 ^= u<<18 | u>>(32-18)
}
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
out[2] = byte(x0 >> 16)
out[3] = byte(x0 >> 24)
out[4] = byte(x5)
out[5] = byte(x5 >> 8)
out[6] = byte(x5 >> 16)
out[7] = byte(x5 >> 24)
out[8] = byte(x10)
out[9] = byte(x10 >> 8)
out[10] = byte(x10 >> 16)
out[11] = byte(x10 >> 24)
out[12] = byte(x15)
out[13] = byte(x15 >> 8)
out[14] = byte(x15 >> 16)
out[15] = byte(x15 >> 24)
out[16] = byte(x6)
out[17] = byte(x6 >> 8)
out[18] = byte(x6 >> 16)
out[19] = byte(x6 >> 24)
out[20] = byte(x7)
out[21] = byte(x7 >> 8)
out[22] = byte(x7 >> 16)
out[23] = byte(x7 >> 24)
out[24] = byte(x8)
out[25] = byte(x8 >> 8)
out[26] = byte(x8 >> 16)
out[27] = byte(x8 >> 24)
out[28] = byte(x9)
out[29] = byte(x9 >> 8)
out[30] = byte(x9 >> 16)
out[31] = byte(x9 >> 24)
}

199
vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package salsa
// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
// the result into the 64-byte array out. The input and output may be the same array.
func Core208(out *[64]byte, in *[64]byte) {
j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24
j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24
j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24
j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24
j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24
j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24
j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24
j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24
j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24
j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24
j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24
j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24
x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
for i := 0; i < 8; i += 2 {
u := x0 + x12
x4 ^= u<<7 | u>>(32-7)
u = x4 + x0
x8 ^= u<<9 | u>>(32-9)
u = x8 + x4
x12 ^= u<<13 | u>>(32-13)
u = x12 + x8
x0 ^= u<<18 | u>>(32-18)
u = x5 + x1
x9 ^= u<<7 | u>>(32-7)
u = x9 + x5
x13 ^= u<<9 | u>>(32-9)
u = x13 + x9
x1 ^= u<<13 | u>>(32-13)
u = x1 + x13
x5 ^= u<<18 | u>>(32-18)
u = x10 + x6
x14 ^= u<<7 | u>>(32-7)
u = x14 + x10
x2 ^= u<<9 | u>>(32-9)
u = x2 + x14
x6 ^= u<<13 | u>>(32-13)
u = x6 + x2
x10 ^= u<<18 | u>>(32-18)
u = x15 + x11
x3 ^= u<<7 | u>>(32-7)
u = x3 + x15
x7 ^= u<<9 | u>>(32-9)
u = x7 + x3
x11 ^= u<<13 | u>>(32-13)
u = x11 + x7
x15 ^= u<<18 | u>>(32-18)
u = x0 + x3
x1 ^= u<<7 | u>>(32-7)
u = x1 + x0
x2 ^= u<<9 | u>>(32-9)
u = x2 + x1
x3 ^= u<<13 | u>>(32-13)
u = x3 + x2
x0 ^= u<<18 | u>>(32-18)
u = x5 + x4
x6 ^= u<<7 | u>>(32-7)
u = x6 + x5
x7 ^= u<<9 | u>>(32-9)
u = x7 + x6
x4 ^= u<<13 | u>>(32-13)
u = x4 + x7
x5 ^= u<<18 | u>>(32-18)
u = x10 + x9
x11 ^= u<<7 | u>>(32-7)
u = x11 + x10
x8 ^= u<<9 | u>>(32-9)
u = x8 + x11
x9 ^= u<<13 | u>>(32-13)
u = x9 + x8
x10 ^= u<<18 | u>>(32-18)
u = x15 + x14
x12 ^= u<<7 | u>>(32-7)
u = x12 + x15
x13 ^= u<<9 | u>>(32-9)
u = x13 + x12
x14 ^= u<<13 | u>>(32-13)
u = x14 + x13
x15 ^= u<<18 | u>>(32-18)
}
x0 += j0
x1 += j1
x2 += j2
x3 += j3
x4 += j4
x5 += j5
x6 += j6
x7 += j7
x8 += j8
x9 += j9
x10 += j10
x11 += j11
x12 += j12
x13 += j13
x14 += j14
x15 += j15
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
out[2] = byte(x0 >> 16)
out[3] = byte(x0 >> 24)
out[4] = byte(x1)
out[5] = byte(x1 >> 8)
out[6] = byte(x1 >> 16)
out[7] = byte(x1 >> 24)
out[8] = byte(x2)
out[9] = byte(x2 >> 8)
out[10] = byte(x2 >> 16)
out[11] = byte(x2 >> 24)
out[12] = byte(x3)
out[13] = byte(x3 >> 8)
out[14] = byte(x3 >> 16)
out[15] = byte(x3 >> 24)
out[16] = byte(x4)
out[17] = byte(x4 >> 8)
out[18] = byte(x4 >> 16)
out[19] = byte(x4 >> 24)
out[20] = byte(x5)
out[21] = byte(x5 >> 8)
out[22] = byte(x5 >> 16)
out[23] = byte(x5 >> 24)
out[24] = byte(x6)
out[25] = byte(x6 >> 8)
out[26] = byte(x6 >> 16)
out[27] = byte(x6 >> 24)
out[28] = byte(x7)
out[29] = byte(x7 >> 8)
out[30] = byte(x7 >> 16)
out[31] = byte(x7 >> 24)
out[32] = byte(x8)
out[33] = byte(x8 >> 8)
out[34] = byte(x8 >> 16)
out[35] = byte(x8 >> 24)
out[36] = byte(x9)
out[37] = byte(x9 >> 8)
out[38] = byte(x9 >> 16)
out[39] = byte(x9 >> 24)
out[40] = byte(x10)
out[41] = byte(x10 >> 8)
out[42] = byte(x10 >> 16)
out[43] = byte(x10 >> 24)
out[44] = byte(x11)
out[45] = byte(x11 >> 8)
out[46] = byte(x11 >> 16)
out[47] = byte(x11 >> 24)
out[48] = byte(x12)
out[49] = byte(x12 >> 8)
out[50] = byte(x12 >> 16)
out[51] = byte(x12 >> 24)
out[52] = byte(x13)
out[53] = byte(x13 >> 8)
out[54] = byte(x13 >> 16)
out[55] = byte(x13 >> 24)
out[56] = byte(x14)
out[57] = byte(x14 >> 8)
out[58] = byte(x14 >> 16)
out[59] = byte(x14 >> 24)
out[60] = byte(x15)
out[61] = byte(x15 >> 8)
out[62] = byte(x15 >> 16)
out[63] = byte(x15 >> 24)
}

View File

@ -0,0 +1,24 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 && !purego && gc
// +build amd64,!purego,gc
package salsa
//go:noescape
// salsa2020XORKeyStream is implemented in salsa20_amd64.s.
func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
// XORKeyStream crypts bytes from in to out using the given key and counters.
// In and out must overlap entirely or not at all. Counter
// contains the raw salsa20 counter bytes (both nonce and block counter).
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
if len(in) == 0 {
return
}
_ = out[len(in)-1]
salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0])
}

View File

@ -0,0 +1,881 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 && !purego && gc
// +build amd64,!purego,gc
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size.
TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment
MOVQ out+0(FP),DI
MOVQ in+8(FP),SI
MOVQ n+16(FP),DX
MOVQ nonce+24(FP),CX
MOVQ key+32(FP),R8
MOVQ SP,R12
ADDQ $31, R12
ANDQ $~31, R12
MOVQ DX,R9
MOVQ CX,DX
MOVQ R8,R10
CMPQ R9,$0
JBE DONE
START:
MOVL 20(R10),CX
MOVL 0(R10),R8
MOVL 0(DX),AX
MOVL 16(R10),R11
MOVL CX,0(R12)
MOVL R8, 4 (R12)
MOVL AX, 8 (R12)
MOVL R11, 12 (R12)
MOVL 8(DX),CX
MOVL 24(R10),R8
MOVL 4(R10),AX
MOVL 4(DX),R11
MOVL CX,16(R12)
MOVL R8, 20 (R12)
MOVL AX, 24 (R12)
MOVL R11, 28 (R12)
MOVL 12(DX),CX
MOVL 12(R10),DX
MOVL 28(R10),R8
MOVL 8(R10),AX
MOVL DX,32(R12)
MOVL CX, 36 (R12)
MOVL R8, 40 (R12)
MOVL AX, 44 (R12)
MOVQ $1634760805,DX
MOVQ $857760878,CX
MOVQ $2036477234,R8
MOVQ $1797285236,AX
MOVL DX,48(R12)
MOVL CX, 52 (R12)
MOVL R8, 56 (R12)
MOVL AX, 60 (R12)
CMPQ R9,$256
JB BYTESBETWEEN1AND255
MOVOA 48(R12),X0
PSHUFL $0X55,X0,X1
PSHUFL $0XAA,X0,X2
PSHUFL $0XFF,X0,X3
PSHUFL $0X00,X0,X0
MOVOA X1,64(R12)
MOVOA X2,80(R12)
MOVOA X3,96(R12)
MOVOA X0,112(R12)
MOVOA 0(R12),X0
PSHUFL $0XAA,X0,X1
PSHUFL $0XFF,X0,X2
PSHUFL $0X00,X0,X3
PSHUFL $0X55,X0,X0
MOVOA X1,128(R12)
MOVOA X2,144(R12)
MOVOA X3,160(R12)
MOVOA X0,176(R12)
MOVOA 16(R12),X0
PSHUFL $0XFF,X0,X1
PSHUFL $0X55,X0,X2
PSHUFL $0XAA,X0,X0
MOVOA X1,192(R12)
MOVOA X2,208(R12)
MOVOA X0,224(R12)
MOVOA 32(R12),X0
PSHUFL $0X00,X0,X1
PSHUFL $0XAA,X0,X2
PSHUFL $0XFF,X0,X0
MOVOA X1,240(R12)
MOVOA X2,256(R12)
MOVOA X0,272(R12)
BYTESATLEAST256:
MOVL 16(R12),DX
MOVL 36 (R12),CX
MOVL DX,288(R12)
MOVL CX,304(R12)
SHLQ $32,CX
ADDQ CX,DX
ADDQ $1,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX, 292 (R12)
MOVL CX, 308 (R12)
ADDQ $1,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX, 296 (R12)
MOVL CX, 312 (R12)
ADDQ $1,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX, 300 (R12)
MOVL CX, 316 (R12)
ADDQ $1,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX,16(R12)
MOVL CX, 36 (R12)
MOVQ R9,352(R12)
MOVQ $20,DX
MOVOA 64(R12),X0
MOVOA 80(R12),X1
MOVOA 96(R12),X2
MOVOA 256(R12),X3
MOVOA 272(R12),X4
MOVOA 128(R12),X5
MOVOA 144(R12),X6
MOVOA 176(R12),X7
MOVOA 192(R12),X8
MOVOA 208(R12),X9
MOVOA 224(R12),X10
MOVOA 304(R12),X11
MOVOA 112(R12),X12
MOVOA 160(R12),X13
MOVOA 240(R12),X14
MOVOA 288(R12),X15
MAINLOOP1:
MOVOA X1,320(R12)
MOVOA X2,336(R12)
MOVOA X13,X1
PADDL X12,X1
MOVOA X1,X2
PSLLL $7,X1
PXOR X1,X14
PSRLL $25,X2
PXOR X2,X14
MOVOA X7,X1
PADDL X0,X1
MOVOA X1,X2
PSLLL $7,X1
PXOR X1,X11
PSRLL $25,X2
PXOR X2,X11
MOVOA X12,X1
PADDL X14,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X15
PSRLL $23,X2
PXOR X2,X15
MOVOA X0,X1
PADDL X11,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X9
PSRLL $23,X2
PXOR X2,X9
MOVOA X14,X1
PADDL X15,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X13
PSRLL $19,X2
PXOR X2,X13
MOVOA X11,X1
PADDL X9,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X7
PSRLL $19,X2
PXOR X2,X7
MOVOA X15,X1
PADDL X13,X1
MOVOA X1,X2
PSLLL $18,X1
PXOR X1,X12
PSRLL $14,X2
PXOR X2,X12
MOVOA 320(R12),X1
MOVOA X12,320(R12)
MOVOA X9,X2
PADDL X7,X2
MOVOA X2,X12
PSLLL $18,X2
PXOR X2,X0
PSRLL $14,X12
PXOR X12,X0
MOVOA X5,X2
PADDL X1,X2
MOVOA X2,X12
PSLLL $7,X2
PXOR X2,X3
PSRLL $25,X12
PXOR X12,X3
MOVOA 336(R12),X2
MOVOA X0,336(R12)
MOVOA X6,X0
PADDL X2,X0
MOVOA X0,X12
PSLLL $7,X0
PXOR X0,X4
PSRLL $25,X12
PXOR X12,X4
MOVOA X1,X0
PADDL X3,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X10
PSRLL $23,X12
PXOR X12,X10
MOVOA X2,X0
PADDL X4,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X8
PSRLL $23,X12
PXOR X12,X8
MOVOA X3,X0
PADDL X10,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X5
PSRLL $19,X12
PXOR X12,X5
MOVOA X4,X0
PADDL X8,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X6
PSRLL $19,X12
PXOR X12,X6
MOVOA X10,X0
PADDL X5,X0
MOVOA X0,X12
PSLLL $18,X0
PXOR X0,X1
PSRLL $14,X12
PXOR X12,X1
MOVOA 320(R12),X0
MOVOA X1,320(R12)
MOVOA X4,X1
PADDL X0,X1
MOVOA X1,X12
PSLLL $7,X1
PXOR X1,X7
PSRLL $25,X12
PXOR X12,X7
MOVOA X8,X1
PADDL X6,X1
MOVOA X1,X12
PSLLL $18,X1
PXOR X1,X2
PSRLL $14,X12
PXOR X12,X2
MOVOA 336(R12),X12
MOVOA X2,336(R12)
MOVOA X14,X1
PADDL X12,X1
MOVOA X1,X2
PSLLL $7,X1
PXOR X1,X5
PSRLL $25,X2
PXOR X2,X5
MOVOA X0,X1
PADDL X7,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X10
PSRLL $23,X2
PXOR X2,X10
MOVOA X12,X1
PADDL X5,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X8
PSRLL $23,X2
PXOR X2,X8
MOVOA X7,X1
PADDL X10,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X4
PSRLL $19,X2
PXOR X2,X4
MOVOA X5,X1
PADDL X8,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X14
PSRLL $19,X2
PXOR X2,X14
MOVOA X10,X1
PADDL X4,X1
MOVOA X1,X2
PSLLL $18,X1
PXOR X1,X0
PSRLL $14,X2
PXOR X2,X0
MOVOA 320(R12),X1
MOVOA X0,320(R12)
MOVOA X8,X0
PADDL X14,X0
MOVOA X0,X2
PSLLL $18,X0
PXOR X0,X12
PSRLL $14,X2
PXOR X2,X12
MOVOA X11,X0
PADDL X1,X0
MOVOA X0,X2
PSLLL $7,X0
PXOR X0,X6
PSRLL $25,X2
PXOR X2,X6
MOVOA 336(R12),X2
MOVOA X12,336(R12)
MOVOA X3,X0
PADDL X2,X0
MOVOA X0,X12
PSLLL $7,X0
PXOR X0,X13
PSRLL $25,X12
PXOR X12,X13
MOVOA X1,X0
PADDL X6,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X15
PSRLL $23,X12
PXOR X12,X15
MOVOA X2,X0
PADDL X13,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X9
PSRLL $23,X12
PXOR X12,X9
MOVOA X6,X0
PADDL X15,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X11
PSRLL $19,X12
PXOR X12,X11
MOVOA X13,X0
PADDL X9,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X3
PSRLL $19,X12
PXOR X12,X3
MOVOA X15,X0
PADDL X11,X0
MOVOA X0,X12
PSLLL $18,X0
PXOR X0,X1
PSRLL $14,X12
PXOR X12,X1
MOVOA X9,X0
PADDL X3,X0
MOVOA X0,X12
PSLLL $18,X0
PXOR X0,X2
PSRLL $14,X12
PXOR X12,X2
MOVOA 320(R12),X12
MOVOA 336(R12),X0
SUBQ $2,DX
JA MAINLOOP1
PADDL 112(R12),X12
PADDL 176(R12),X7
PADDL 224(R12),X10
PADDL 272(R12),X4
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
PSHUFL $0X39,X12,X12
PSHUFL $0X39,X7,X7
PSHUFL $0X39,X10,X10
PSHUFL $0X39,X4,X4
XORL 0(SI),DX
XORL 4(SI),CX
XORL 8(SI),R8
XORL 12(SI),R9
MOVL DX,0(DI)
MOVL CX,4(DI)
MOVL R8,8(DI)
MOVL R9,12(DI)
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
PSHUFL $0X39,X12,X12
PSHUFL $0X39,X7,X7
PSHUFL $0X39,X10,X10
PSHUFL $0X39,X4,X4
XORL 64(SI),DX
XORL 68(SI),CX
XORL 72(SI),R8
XORL 76(SI),R9
MOVL DX,64(DI)
MOVL CX,68(DI)
MOVL R8,72(DI)
MOVL R9,76(DI)
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
PSHUFL $0X39,X12,X12
PSHUFL $0X39,X7,X7
PSHUFL $0X39,X10,X10
PSHUFL $0X39,X4,X4
XORL 128(SI),DX
XORL 132(SI),CX
XORL 136(SI),R8
XORL 140(SI),R9
MOVL DX,128(DI)
MOVL CX,132(DI)
MOVL R8,136(DI)
MOVL R9,140(DI)
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
XORL 192(SI),DX
XORL 196(SI),CX
XORL 200(SI),R8
XORL 204(SI),R9
MOVL DX,192(DI)
MOVL CX,196(DI)
MOVL R8,200(DI)
MOVL R9,204(DI)
PADDL 240(R12),X14
PADDL 64(R12),X0
PADDL 128(R12),X5
PADDL 192(R12),X8
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
PSHUFL $0X39,X14,X14
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X5,X5
PSHUFL $0X39,X8,X8
XORL 16(SI),DX
XORL 20(SI),CX
XORL 24(SI),R8
XORL 28(SI),R9
MOVL DX,16(DI)
MOVL CX,20(DI)
MOVL R8,24(DI)
MOVL R9,28(DI)
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
PSHUFL $0X39,X14,X14
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X5,X5
PSHUFL $0X39,X8,X8
XORL 80(SI),DX
XORL 84(SI),CX
XORL 88(SI),R8
XORL 92(SI),R9
MOVL DX,80(DI)
MOVL CX,84(DI)
MOVL R8,88(DI)
MOVL R9,92(DI)
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
PSHUFL $0X39,X14,X14
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X5,X5
PSHUFL $0X39,X8,X8
XORL 144(SI),DX
XORL 148(SI),CX
XORL 152(SI),R8
XORL 156(SI),R9
MOVL DX,144(DI)
MOVL CX,148(DI)
MOVL R8,152(DI)
MOVL R9,156(DI)
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
XORL 208(SI),DX
XORL 212(SI),CX
XORL 216(SI),R8
XORL 220(SI),R9
MOVL DX,208(DI)
MOVL CX,212(DI)
MOVL R8,216(DI)
MOVL R9,220(DI)
PADDL 288(R12),X15
PADDL 304(R12),X11
PADDL 80(R12),X1
PADDL 144(R12),X6
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
PSHUFL $0X39,X15,X15
PSHUFL $0X39,X11,X11
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X6,X6
XORL 32(SI),DX
XORL 36(SI),CX
XORL 40(SI),R8
XORL 44(SI),R9
MOVL DX,32(DI)
MOVL CX,36(DI)
MOVL R8,40(DI)
MOVL R9,44(DI)
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
PSHUFL $0X39,X15,X15
PSHUFL $0X39,X11,X11
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X6,X6
XORL 96(SI),DX
XORL 100(SI),CX
XORL 104(SI),R8
XORL 108(SI),R9
MOVL DX,96(DI)
MOVL CX,100(DI)
MOVL R8,104(DI)
MOVL R9,108(DI)
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
PSHUFL $0X39,X15,X15
PSHUFL $0X39,X11,X11
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X6,X6
XORL 160(SI),DX
XORL 164(SI),CX
XORL 168(SI),R8
XORL 172(SI),R9
MOVL DX,160(DI)
MOVL CX,164(DI)
MOVL R8,168(DI)
MOVL R9,172(DI)
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
XORL 224(SI),DX
XORL 228(SI),CX
XORL 232(SI),R8
XORL 236(SI),R9
MOVL DX,224(DI)
MOVL CX,228(DI)
MOVL R8,232(DI)
MOVL R9,236(DI)
PADDL 160(R12),X13
PADDL 208(R12),X9
PADDL 256(R12),X3
PADDL 96(R12),X2
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
PSHUFL $0X39,X13,X13
PSHUFL $0X39,X9,X9
PSHUFL $0X39,X3,X3
PSHUFL $0X39,X2,X2
XORL 48(SI),DX
XORL 52(SI),CX
XORL 56(SI),R8
XORL 60(SI),R9
MOVL DX,48(DI)
MOVL CX,52(DI)
MOVL R8,56(DI)
MOVL R9,60(DI)
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
PSHUFL $0X39,X13,X13
PSHUFL $0X39,X9,X9
PSHUFL $0X39,X3,X3
PSHUFL $0X39,X2,X2
XORL 112(SI),DX
XORL 116(SI),CX
XORL 120(SI),R8
XORL 124(SI),R9
MOVL DX,112(DI)
MOVL CX,116(DI)
MOVL R8,120(DI)
MOVL R9,124(DI)
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
PSHUFL $0X39,X13,X13
PSHUFL $0X39,X9,X9
PSHUFL $0X39,X3,X3
PSHUFL $0X39,X2,X2
XORL 176(SI),DX
XORL 180(SI),CX
XORL 184(SI),R8
XORL 188(SI),R9
MOVL DX,176(DI)
MOVL CX,180(DI)
MOVL R8,184(DI)
MOVL R9,188(DI)
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
XORL 240(SI),DX
XORL 244(SI),CX
XORL 248(SI),R8
XORL 252(SI),R9
MOVL DX,240(DI)
MOVL CX,244(DI)
MOVL R8,248(DI)
MOVL R9,252(DI)
MOVQ 352(R12),R9
SUBQ $256,R9
ADDQ $256,SI
ADDQ $256,DI
CMPQ R9,$256
JAE BYTESATLEAST256
CMPQ R9,$0
JBE DONE
BYTESBETWEEN1AND255:
CMPQ R9,$64
JAE NOCOPY
MOVQ DI,DX
LEAQ 360(R12),DI
MOVQ R9,CX
REP; MOVSB
LEAQ 360(R12),DI
LEAQ 360(R12),SI
NOCOPY:
MOVQ R9,352(R12)
MOVOA 48(R12),X0
MOVOA 0(R12),X1
MOVOA 16(R12),X2
MOVOA 32(R12),X3
MOVOA X1,X4
MOVQ $20,CX
MAINLOOP2:
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X3
PXOR X6,X3
PADDL X3,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X3,X3
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X1
PSHUFL $0X4E,X2,X2
PXOR X6,X1
PADDL X1,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $18,X5
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X1,X1
PXOR X6,X0
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X1
PXOR X6,X1
PADDL X1,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X1,X1
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X3
PSHUFL $0X4E,X2,X2
PXOR X6,X3
PADDL X3,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $18,X5
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X3,X3
PXOR X6,X0
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X3
PXOR X6,X3
PADDL X3,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X3,X3
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X1
PSHUFL $0X4E,X2,X2
PXOR X6,X1
PADDL X1,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $18,X5
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X1,X1
PXOR X6,X0
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X1
PXOR X6,X1
PADDL X1,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X1,X1
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X3
PSHUFL $0X4E,X2,X2
PXOR X6,X3
SUBQ $4,CX
PADDL X3,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $18,X5
PXOR X7,X7
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X3,X3
PXOR X6,X0
JA MAINLOOP2
PADDL 48(R12),X0
PADDL 0(R12),X1
PADDL 16(R12),X2
PADDL 32(R12),X3
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X2,X2
PSHUFL $0X39,X3,X3
XORL 0(SI),CX
XORL 48(SI),R8
XORL 32(SI),R9
XORL 16(SI),AX
MOVL CX,0(DI)
MOVL R8,48(DI)
MOVL R9,32(DI)
MOVL AX,16(DI)
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X2,X2
PSHUFL $0X39,X3,X3
XORL 20(SI),CX
XORL 4(SI),R8
XORL 52(SI),R9
XORL 36(SI),AX
MOVL CX,20(DI)
MOVL R8,4(DI)
MOVL R9,52(DI)
MOVL AX,36(DI)
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X2,X2
PSHUFL $0X39,X3,X3
XORL 40(SI),CX
XORL 24(SI),R8
XORL 8(SI),R9
XORL 56(SI),AX
MOVL CX,40(DI)
MOVL R8,24(DI)
MOVL R9,8(DI)
MOVL AX,56(DI)
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
XORL 60(SI),CX
XORL 44(SI),R8
XORL 28(SI),R9
XORL 12(SI),AX
MOVL CX,60(DI)
MOVL R8,44(DI)
MOVL R9,28(DI)
MOVL AX,12(DI)
MOVQ 352(R12),R9
MOVL 16(R12),CX
MOVL 36 (R12),R8
ADDQ $1,CX
SHLQ $32,R8
ADDQ R8,CX
MOVQ CX,R8
SHRQ $32,R8
MOVL CX,16(R12)
MOVL R8, 36 (R12)
CMPQ R9,$64
JA BYTESATLEAST65
JAE BYTESATLEAST64
MOVQ DI,SI
MOVQ DX,DI
MOVQ R9,CX
REP; MOVSB
BYTESATLEAST64:
DONE:
RET
BYTESATLEAST65:
SUBQ $64,R9
ADDQ $64,DI
ADDQ $64,SI
JMP BYTESBETWEEN1AND255

View File

@ -0,0 +1,15 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 || purego || !gc
// +build !amd64 purego !gc
package salsa
// XORKeyStream crypts bytes from in to out using the given key and counters.
// In and out must overlap entirely or not at all. Counter
// contains the raw salsa20 counter bytes (both nonce and block counter).
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
genericXORKeyStream(out, in, counter, key)
}

231
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go generated vendored Normal file
View File

@ -0,0 +1,231 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package salsa
const rounds = 20
// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
// and 16-byte constant c, and puts the result into 64-byte array out.
func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
for i := 0; i < rounds; i += 2 {
u := x0 + x12
x4 ^= u<<7 | u>>(32-7)
u = x4 + x0
x8 ^= u<<9 | u>>(32-9)
u = x8 + x4
x12 ^= u<<13 | u>>(32-13)
u = x12 + x8
x0 ^= u<<18 | u>>(32-18)
u = x5 + x1
x9 ^= u<<7 | u>>(32-7)
u = x9 + x5
x13 ^= u<<9 | u>>(32-9)
u = x13 + x9
x1 ^= u<<13 | u>>(32-13)
u = x1 + x13
x5 ^= u<<18 | u>>(32-18)
u = x10 + x6
x14 ^= u<<7 | u>>(32-7)
u = x14 + x10
x2 ^= u<<9 | u>>(32-9)
u = x2 + x14
x6 ^= u<<13 | u>>(32-13)
u = x6 + x2
x10 ^= u<<18 | u>>(32-18)
u = x15 + x11
x3 ^= u<<7 | u>>(32-7)
u = x3 + x15
x7 ^= u<<9 | u>>(32-9)
u = x7 + x3
x11 ^= u<<13 | u>>(32-13)
u = x11 + x7
x15 ^= u<<18 | u>>(32-18)
u = x0 + x3
x1 ^= u<<7 | u>>(32-7)
u = x1 + x0
x2 ^= u<<9 | u>>(32-9)
u = x2 + x1
x3 ^= u<<13 | u>>(32-13)
u = x3 + x2
x0 ^= u<<18 | u>>(32-18)
u = x5 + x4
x6 ^= u<<7 | u>>(32-7)
u = x6 + x5
x7 ^= u<<9 | u>>(32-9)
u = x7 + x6
x4 ^= u<<13 | u>>(32-13)
u = x4 + x7
x5 ^= u<<18 | u>>(32-18)
u = x10 + x9
x11 ^= u<<7 | u>>(32-7)
u = x11 + x10
x8 ^= u<<9 | u>>(32-9)
u = x8 + x11
x9 ^= u<<13 | u>>(32-13)
u = x9 + x8
x10 ^= u<<18 | u>>(32-18)
u = x15 + x14
x12 ^= u<<7 | u>>(32-7)
u = x12 + x15
x13 ^= u<<9 | u>>(32-9)
u = x13 + x12
x14 ^= u<<13 | u>>(32-13)
u = x14 + x13
x15 ^= u<<18 | u>>(32-18)
}
x0 += j0
x1 += j1
x2 += j2
x3 += j3
x4 += j4
x5 += j5
x6 += j6
x7 += j7
x8 += j8
x9 += j9
x10 += j10
x11 += j11
x12 += j12
x13 += j13
x14 += j14
x15 += j15
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
out[2] = byte(x0 >> 16)
out[3] = byte(x0 >> 24)
out[4] = byte(x1)
out[5] = byte(x1 >> 8)
out[6] = byte(x1 >> 16)
out[7] = byte(x1 >> 24)
out[8] = byte(x2)
out[9] = byte(x2 >> 8)
out[10] = byte(x2 >> 16)
out[11] = byte(x2 >> 24)
out[12] = byte(x3)
out[13] = byte(x3 >> 8)
out[14] = byte(x3 >> 16)
out[15] = byte(x3 >> 24)
out[16] = byte(x4)
out[17] = byte(x4 >> 8)
out[18] = byte(x4 >> 16)
out[19] = byte(x4 >> 24)
out[20] = byte(x5)
out[21] = byte(x5 >> 8)
out[22] = byte(x5 >> 16)
out[23] = byte(x5 >> 24)
out[24] = byte(x6)
out[25] = byte(x6 >> 8)
out[26] = byte(x6 >> 16)
out[27] = byte(x6 >> 24)
out[28] = byte(x7)
out[29] = byte(x7 >> 8)
out[30] = byte(x7 >> 16)
out[31] = byte(x7 >> 24)
out[32] = byte(x8)
out[33] = byte(x8 >> 8)
out[34] = byte(x8 >> 16)
out[35] = byte(x8 >> 24)
out[36] = byte(x9)
out[37] = byte(x9 >> 8)
out[38] = byte(x9 >> 16)
out[39] = byte(x9 >> 24)
out[40] = byte(x10)
out[41] = byte(x10 >> 8)
out[42] = byte(x10 >> 16)
out[43] = byte(x10 >> 24)
out[44] = byte(x11)
out[45] = byte(x11 >> 8)
out[46] = byte(x11 >> 16)
out[47] = byte(x11 >> 24)
out[48] = byte(x12)
out[49] = byte(x12 >> 8)
out[50] = byte(x12 >> 16)
out[51] = byte(x12 >> 24)
out[52] = byte(x13)
out[53] = byte(x13 >> 8)
out[54] = byte(x13 >> 16)
out[55] = byte(x13 >> 24)
out[56] = byte(x14)
out[57] = byte(x14 >> 8)
out[58] = byte(x14 >> 16)
out[59] = byte(x14 >> 24)
out[60] = byte(x15)
out[61] = byte(x15 >> 8)
out[62] = byte(x15 >> 16)
out[63] = byte(x15 >> 24)
}
// genericXORKeyStream is the generic implementation of XORKeyStream to be used
// when no assembly implementation is available.
func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
var block [64]byte
var counterCopy [16]byte
copy(counterCopy[:], counter[:])
for len(in) >= 64 {
core(&block, &counterCopy, key, &Sigma)
for i, x := range block {
out[i] = in[i] ^ x
}
u := uint32(1)
for i := 8; i < 16; i++ {
u += uint32(counterCopy[i])
counterCopy[i] = byte(u)
u >>= 8
}
in = in[64:]
out = out[64:]
}
if len(in) > 0 {
core(&block, &counterCopy, key, &Sigma)
for i, v := range in {
out[i] = v ^ block[i]
}
}
}

18
vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s generated vendored Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
// +build gc
#include "textflag.h"
//
// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
//
TEXT ·syscall6(SB),NOSPLIT,$0-88
JMP syscall·syscall6(SB)
TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
JMP syscall·rawSyscall6(SB)

65
vendor/golang.org/x/sys/cpu/byteorder.go generated vendored Normal file
View File

@ -0,0 +1,65 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"runtime"
)
// byteOrder is a subset of encoding/binary.ByteOrder.
type byteOrder interface {
Uint32([]byte) uint32
Uint64([]byte) uint64
}
type littleEndian struct{}
type bigEndian struct{}
func (littleEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func (littleEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func (bigEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
func (bigEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
// hostByteOrder returns littleEndian on little-endian machines and
// bigEndian on big-endian machines.
func hostByteOrder() byteOrder {
switch runtime.GOARCH {
case "386", "amd64", "amd64p32",
"alpha",
"arm", "arm64",
"mipsle", "mips64le", "mips64p32le",
"nios2",
"ppc64le",
"riscv", "riscv64",
"sh":
return littleEndian{}
case "armbe", "arm64be",
"m68k",
"mips", "mips64", "mips64p32",
"ppc", "ppc64",
"s390", "s390x",
"shbe",
"sparc", "sparc64":
return bigEndian{}
}
panic("unknown architecture")
}

287
vendor/golang.org/x/sys/cpu/cpu.go generated vendored Normal file
View File

@ -0,0 +1,287 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cpu implements processor feature detection for
// various CPU architectures.
package cpu
import (
"os"
"strings"
)
// Initialized reports whether the CPU features were initialized.
//
// For some GOOS/GOARCH combinations initialization of the CPU features depends
// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm
// Initialized will report false if reading the file fails.
var Initialized bool
// CacheLinePad is used to pad structs to avoid false sharing.
type CacheLinePad struct{ _ [cacheLineSize]byte }
// X86 contains the supported CPU features of the
// current X86/AMD64 platform. If the current platform
// is not X86/AMD64 then all feature flags are false.
//
// X86 is padded to avoid false sharing. Further the HasAVX
// and HasAVX2 are only set if the OS supports XMM and YMM
// registers in addition to the CPUID feature bit being set.
var X86 struct {
_ CacheLinePad
HasAES bool // AES hardware implementation (AES NI)
HasADX bool // Multi-precision add-carry instruction extensions
HasAVX bool // Advanced vector extension
HasAVX2 bool // Advanced vector extension 2
HasAVX512 bool // Advanced vector extension 512
HasAVX512F bool // Advanced vector extension 512 Foundation Instructions
HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions
HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions
HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions
HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions
HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions
HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions
HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add
HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions
HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision
HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision
HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions
HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations
HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions
HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions
HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions
HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2
HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms
HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions
HasBMI1 bool // Bit manipulation instruction set 1
HasBMI2 bool // Bit manipulation instruction set 2
HasCX16 bool // Compare and exchange 16 Bytes
HasERMS bool // Enhanced REP for MOVSB and STOSB
HasFMA bool // Fused-multiply-add instructions
HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM
HasPOPCNT bool // Hamming weight instruction POPCNT.
HasRDRAND bool // RDRAND instruction (on-chip random number generator)
HasRDSEED bool // RDSEED instruction (on-chip random number generator)
HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64)
HasSSE3 bool // Streaming SIMD extension 3
HasSSSE3 bool // Supplemental streaming SIMD extension 3
HasSSE41 bool // Streaming SIMD extension 4 and 4.1
HasSSE42 bool // Streaming SIMD extension 4 and 4.2
_ CacheLinePad
}
// ARM64 contains the supported CPU features of the
// current ARMv8(aarch64) platform. If the current platform
// is not arm64 then all feature flags are false.
var ARM64 struct {
_ CacheLinePad
HasFP bool // Floating-point instruction set (always available)
HasASIMD bool // Advanced SIMD (always available)
HasEVTSTRM bool // Event stream support
HasAES bool // AES hardware implementation
HasPMULL bool // Polynomial multiplication instruction set
HasSHA1 bool // SHA1 hardware implementation
HasSHA2 bool // SHA2 hardware implementation
HasCRC32 bool // CRC32 hardware implementation
HasATOMICS bool // Atomic memory operation instruction set
HasFPHP bool // Half precision floating-point instruction set
HasASIMDHP bool // Advanced SIMD half precision instruction set
HasCPUID bool // CPUID identification scheme registers
HasASIMDRDM bool // Rounding double multiply add/subtract instruction set
HasJSCVT bool // Javascript conversion from floating-point to integer
HasFCMA bool // Floating-point multiplication and addition of complex numbers
HasLRCPC bool // Release Consistent processor consistent support
HasDCPOP bool // Persistent memory support
HasSHA3 bool // SHA3 hardware implementation
HasSM3 bool // SM3 hardware implementation
HasSM4 bool // SM4 hardware implementation
HasASIMDDP bool // Advanced SIMD double precision instruction set
HasSHA512 bool // SHA512 hardware implementation
HasSVE bool // Scalable Vector Extensions
HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
_ CacheLinePad
}
// ARM contains the supported CPU features of the current ARM (32-bit) platform.
// All feature flags are false if:
// 1. the current platform is not arm, or
// 2. the current operating system is not Linux.
var ARM struct {
_ CacheLinePad
HasSWP bool // SWP instruction support
HasHALF bool // Half-word load and store support
HasTHUMB bool // ARM Thumb instruction set
Has26BIT bool // Address space limited to 26-bits
HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support
HasFPA bool // Floating point arithmetic support
HasVFP bool // Vector floating point support
HasEDSP bool // DSP Extensions support
HasJAVA bool // Java instruction set
HasIWMMXT bool // Intel Wireless MMX technology support
HasCRUNCH bool // MaverickCrunch context switching and handling
HasTHUMBEE bool // Thumb EE instruction set
HasNEON bool // NEON instruction set
HasVFPv3 bool // Vector floating point version 3 support
HasVFPv3D16 bool // Vector floating point version 3 D8-D15
HasTLS bool // Thread local storage support
HasVFPv4 bool // Vector floating point version 4 support
HasIDIVA bool // Integer divide instruction support in ARM mode
HasIDIVT bool // Integer divide instruction support in Thumb mode
HasVFPD32 bool // Vector floating point version 3 D15-D31
HasLPAE bool // Large Physical Address Extensions
HasEVTSTRM bool // Event stream support
HasAES bool // AES hardware implementation
HasPMULL bool // Polynomial multiplication instruction set
HasSHA1 bool // SHA1 hardware implementation
HasSHA2 bool // SHA2 hardware implementation
HasCRC32 bool // CRC32 hardware implementation
_ CacheLinePad
}
// MIPS64X contains the supported CPU features of the current mips64/mips64le
// platforms. If the current platform is not mips64/mips64le or the current
// operating system is not Linux then all feature flags are false.
var MIPS64X struct {
_ CacheLinePad
HasMSA bool // MIPS SIMD architecture
_ CacheLinePad
}
// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms.
// If the current platform is not ppc64/ppc64le then all feature flags are false.
//
// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00,
// since there are no optional categories. There are some exceptions that also
// require kernel support to work (DARN, SCV), so there are feature bits for
// those as well. The struct is padded to avoid false sharing.
var PPC64 struct {
_ CacheLinePad
HasDARN bool // Hardware random number generator (requires kernel enablement)
HasSCV bool // Syscall vectored (requires kernel enablement)
IsPOWER8 bool // ISA v2.07 (POWER8)
IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8
_ CacheLinePad
}
// S390X contains the supported CPU features of the current IBM Z
// (s390x) platform. If the current platform is not IBM Z then all
// feature flags are false.
//
// S390X is padded to avoid false sharing. Further HasVX is only set
// if the OS supports vector registers in addition to the STFLE
// feature bit being set.
var S390X struct {
_ CacheLinePad
HasZARCH bool // z/Architecture mode is active [mandatory]
HasSTFLE bool // store facility list extended
HasLDISP bool // long (20-bit) displacements
HasEIMM bool // 32-bit immediates
HasDFP bool // decimal floating point
HasETF3EH bool // ETF-3 enhanced
HasMSA bool // message security assist (CPACF)
HasAES bool // KM-AES{128,192,256} functions
HasAESCBC bool // KMC-AES{128,192,256} functions
HasAESCTR bool // KMCTR-AES{128,192,256} functions
HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
HasGHASH bool // KIMD-GHASH function
HasSHA1 bool // K{I,L}MD-SHA-1 functions
HasSHA256 bool // K{I,L}MD-SHA-256 functions
HasSHA512 bool // K{I,L}MD-SHA-512 functions
HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
HasVX bool // vector facility
HasVXE bool // vector-enhancements facility 1
_ CacheLinePad
}
func init() {
archInit()
initOptions()
processOptions()
}
// options contains the cpu debug options that can be used in GODEBUG.
// Options are arch dependent and are added by the arch specific initOptions functions.
// Features that are mandatory for the specific GOARCH should have the Required field set
// (e.g. SSE2 on amd64).
var options []option
// Option names should be lower case. e.g. avx instead of AVX.
type option struct {
Name string
Feature *bool
Specified bool // whether feature value was specified in GODEBUG
Enable bool // whether feature should be enabled
Required bool // whether feature is mandatory and can not be disabled
}
func processOptions() {
env := os.Getenv("GODEBUG")
field:
for env != "" {
field := ""
i := strings.IndexByte(env, ',')
if i < 0 {
field, env = env, ""
} else {
field, env = env[:i], env[i+1:]
}
if len(field) < 4 || field[:4] != "cpu." {
continue
}
i = strings.IndexByte(field, '=')
if i < 0 {
print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n")
continue
}
key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
var enable bool
switch value {
case "on":
enable = true
case "off":
enable = false
default:
print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
continue field
}
if key == "all" {
for i := range options {
options[i].Specified = true
options[i].Enable = enable || options[i].Required
}
continue field
}
for i := range options {
if options[i].Name == key {
options[i].Specified = true
options[i].Enable = enable
continue field
}
}
print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n")
}
for _, o := range options {
if !o.Specified {
continue
}
if o.Enable && !*o.Feature {
print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n")
continue
}
if !o.Enable && o.Required {
print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n")
continue
}
*o.Feature = o.Enable
}
}

34
vendor/golang.org/x/sys/cpu/cpu_aix.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix
// +build aix
package cpu
const (
// getsystemcfg constants
_SC_IMPL = 2
_IMPL_POWER8 = 0x10000
_IMPL_POWER9 = 0x20000
)
func archInit() {
impl := getsystemcfg(_SC_IMPL)
if impl&_IMPL_POWER8 != 0 {
PPC64.IsPOWER8 = true
}
if impl&_IMPL_POWER9 != 0 {
PPC64.IsPOWER8 = true
PPC64.IsPOWER9 = true
}
Initialized = true
}
func getsystemcfg(label int) (n uint64) {
r0, _ := callgetsystemcfg(label)
n = uint64(r0)
return
}

73
vendor/golang.org/x/sys/cpu/cpu_arm.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
const cacheLineSize = 32
// HWCAP/HWCAP2 bits.
// These are specific to Linux.
const (
hwcap_SWP = 1 << 0
hwcap_HALF = 1 << 1
hwcap_THUMB = 1 << 2
hwcap_26BIT = 1 << 3
hwcap_FAST_MULT = 1 << 4
hwcap_FPA = 1 << 5
hwcap_VFP = 1 << 6
hwcap_EDSP = 1 << 7
hwcap_JAVA = 1 << 8
hwcap_IWMMXT = 1 << 9
hwcap_CRUNCH = 1 << 10
hwcap_THUMBEE = 1 << 11
hwcap_NEON = 1 << 12
hwcap_VFPv3 = 1 << 13
hwcap_VFPv3D16 = 1 << 14
hwcap_TLS = 1 << 15
hwcap_VFPv4 = 1 << 16
hwcap_IDIVA = 1 << 17
hwcap_IDIVT = 1 << 18
hwcap_VFPD32 = 1 << 19
hwcap_LPAE = 1 << 20
hwcap_EVTSTRM = 1 << 21
hwcap2_AES = 1 << 0
hwcap2_PMULL = 1 << 1
hwcap2_SHA1 = 1 << 2
hwcap2_SHA2 = 1 << 3
hwcap2_CRC32 = 1 << 4
)
func initOptions() {
options = []option{
{Name: "pmull", Feature: &ARM.HasPMULL},
{Name: "sha1", Feature: &ARM.HasSHA1},
{Name: "sha2", Feature: &ARM.HasSHA2},
{Name: "swp", Feature: &ARM.HasSWP},
{Name: "thumb", Feature: &ARM.HasTHUMB},
{Name: "thumbee", Feature: &ARM.HasTHUMBEE},
{Name: "tls", Feature: &ARM.HasTLS},
{Name: "vfp", Feature: &ARM.HasVFP},
{Name: "vfpd32", Feature: &ARM.HasVFPD32},
{Name: "vfpv3", Feature: &ARM.HasVFPv3},
{Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16},
{Name: "vfpv4", Feature: &ARM.HasVFPv4},
{Name: "half", Feature: &ARM.HasHALF},
{Name: "26bit", Feature: &ARM.Has26BIT},
{Name: "fastmul", Feature: &ARM.HasFASTMUL},
{Name: "fpa", Feature: &ARM.HasFPA},
{Name: "edsp", Feature: &ARM.HasEDSP},
{Name: "java", Feature: &ARM.HasJAVA},
{Name: "iwmmxt", Feature: &ARM.HasIWMMXT},
{Name: "crunch", Feature: &ARM.HasCRUNCH},
{Name: "neon", Feature: &ARM.HasNEON},
{Name: "idivt", Feature: &ARM.HasIDIVT},
{Name: "idiva", Feature: &ARM.HasIDIVA},
{Name: "lpae", Feature: &ARM.HasLPAE},
{Name: "evtstrm", Feature: &ARM.HasEVTSTRM},
{Name: "aes", Feature: &ARM.HasAES},
{Name: "crc32", Feature: &ARM.HasCRC32},
}
}

172
vendor/golang.org/x/sys/cpu/cpu_arm64.go generated vendored Normal file
View File

@ -0,0 +1,172 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import "runtime"
const cacheLineSize = 64
func initOptions() {
options = []option{
{Name: "fp", Feature: &ARM64.HasFP},
{Name: "asimd", Feature: &ARM64.HasASIMD},
{Name: "evstrm", Feature: &ARM64.HasEVTSTRM},
{Name: "aes", Feature: &ARM64.HasAES},
{Name: "fphp", Feature: &ARM64.HasFPHP},
{Name: "jscvt", Feature: &ARM64.HasJSCVT},
{Name: "lrcpc", Feature: &ARM64.HasLRCPC},
{Name: "pmull", Feature: &ARM64.HasPMULL},
{Name: "sha1", Feature: &ARM64.HasSHA1},
{Name: "sha2", Feature: &ARM64.HasSHA2},
{Name: "sha3", Feature: &ARM64.HasSHA3},
{Name: "sha512", Feature: &ARM64.HasSHA512},
{Name: "sm3", Feature: &ARM64.HasSM3},
{Name: "sm4", Feature: &ARM64.HasSM4},
{Name: "sve", Feature: &ARM64.HasSVE},
{Name: "crc32", Feature: &ARM64.HasCRC32},
{Name: "atomics", Feature: &ARM64.HasATOMICS},
{Name: "asimdhp", Feature: &ARM64.HasASIMDHP},
{Name: "cpuid", Feature: &ARM64.HasCPUID},
{Name: "asimrdm", Feature: &ARM64.HasASIMDRDM},
{Name: "fcma", Feature: &ARM64.HasFCMA},
{Name: "dcpop", Feature: &ARM64.HasDCPOP},
{Name: "asimddp", Feature: &ARM64.HasASIMDDP},
{Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
}
}
func archInit() {
switch runtime.GOOS {
case "freebsd":
readARM64Registers()
case "linux", "netbsd":
doinit()
default:
// Most platforms don't seem to allow reading these registers.
//
// OpenBSD:
// See https://golang.org/issue/31746
setMinimalFeatures()
}
}
// setMinimalFeatures fakes the minimal ARM64 features expected by
// TestARM64minimalFeatures.
func setMinimalFeatures() {
ARM64.HasASIMD = true
ARM64.HasFP = true
}
func readARM64Registers() {
Initialized = true
parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0())
}
func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
// ID_AA64ISAR0_EL1
switch extractBits(isar0, 4, 7) {
case 1:
ARM64.HasAES = true
case 2:
ARM64.HasAES = true
ARM64.HasPMULL = true
}
switch extractBits(isar0, 8, 11) {
case 1:
ARM64.HasSHA1 = true
}
switch extractBits(isar0, 12, 15) {
case 1:
ARM64.HasSHA2 = true
case 2:
ARM64.HasSHA2 = true
ARM64.HasSHA512 = true
}
switch extractBits(isar0, 16, 19) {
case 1:
ARM64.HasCRC32 = true
}
switch extractBits(isar0, 20, 23) {
case 2:
ARM64.HasATOMICS = true
}
switch extractBits(isar0, 28, 31) {
case 1:
ARM64.HasASIMDRDM = true
}
switch extractBits(isar0, 32, 35) {
case 1:
ARM64.HasSHA3 = true
}
switch extractBits(isar0, 36, 39) {
case 1:
ARM64.HasSM3 = true
}
switch extractBits(isar0, 40, 43) {
case 1:
ARM64.HasSM4 = true
}
switch extractBits(isar0, 44, 47) {
case 1:
ARM64.HasASIMDDP = true
}
// ID_AA64ISAR1_EL1
switch extractBits(isar1, 0, 3) {
case 1:
ARM64.HasDCPOP = true
}
switch extractBits(isar1, 12, 15) {
case 1:
ARM64.HasJSCVT = true
}
switch extractBits(isar1, 16, 19) {
case 1:
ARM64.HasFCMA = true
}
switch extractBits(isar1, 20, 23) {
case 1:
ARM64.HasLRCPC = true
}
// ID_AA64PFR0_EL1
switch extractBits(pfr0, 16, 19) {
case 0:
ARM64.HasFP = true
case 1:
ARM64.HasFP = true
ARM64.HasFPHP = true
}
switch extractBits(pfr0, 20, 23) {
case 0:
ARM64.HasASIMD = true
case 1:
ARM64.HasASIMD = true
ARM64.HasASIMDHP = true
}
switch extractBits(pfr0, 32, 35) {
case 1:
ARM64.HasSVE = true
}
}
func extractBits(data uint64, start, end uint) uint {
return (uint)(data>>start) & ((1 << (end - start + 1)) - 1)
}

32
vendor/golang.org/x/sys/cpu/cpu_arm64.s generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
// +build gc
#include "textflag.h"
// func getisar0() uint64
TEXT ·getisar0(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 0 into x0
// mrs x0, ID_AA64ISAR0_EL1 = d5380600
WORD $0xd5380600
MOVD R0, ret+0(FP)
RET
// func getisar1() uint64
TEXT ·getisar1(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 1 into x0
// mrs x0, ID_AA64ISAR1_EL1 = d5380620
WORD $0xd5380620
MOVD R0, ret+0(FP)
RET
// func getpfr0() uint64
TEXT ·getpfr0(SB),NOSPLIT,$0-8
// get Processor Feature Register 0 into x0
// mrs x0, ID_AA64PFR0_EL1 = d5380400
WORD $0xd5380400
MOVD R0, ret+0(FP)
RET

12
vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
// +build gc
package cpu
func getisar0() uint64
func getisar1() uint64
func getpfr0() uint64

22
vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
// +build gc
package cpu
// haveAsmFunctions reports whether the other functions in this file can
// be safely called.
func haveAsmFunctions() bool { return true }
// The following feature detection functions are defined in cpu_s390x.s.
// They are likely to be expensive to call so the results should be cached.
func stfle() facilityList
func kmQuery() queryResult
func kmcQuery() queryResult
func kmctrQuery() queryResult
func kmaQuery() queryResult
func kimdQuery() queryResult
func klmdQuery() queryResult

21
vendor/golang.org/x/sys/cpu/cpu_gc_x86.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (386 || amd64 || amd64p32) && gc
// +build 386 amd64 amd64p32
// +build gc
package cpu
// cpuid is implemented in cpu_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func xgetbv() (eax, edx uint32)
// darwinSupportsAVX512 is implemented in cpu_x86.s for gc compiler
// and in cpu_gccgo_x86.go for gccgo.
func darwinSupportsAVX512() bool

12
vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gccgo
// +build gccgo
package cpu
func getisar0() uint64 { return 0 }
func getisar1() uint64 { return 0 }
func getpfr0() uint64 { return 0 }

23
vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gccgo
// +build gccgo
package cpu
// haveAsmFunctions reports whether the other functions in this file can
// be safely called.
func haveAsmFunctions() bool { return false }
// TODO(mundaym): the following feature detection functions are currently
// stubs. See https://golang.org/cl/162887 for how to fix this.
// They are likely to be expensive to call so the results should be cached.
func stfle() facilityList { panic("not implemented for gccgo") }
func kmQuery() queryResult { panic("not implemented for gccgo") }
func kmcQuery() queryResult { panic("not implemented for gccgo") }
func kmctrQuery() queryResult { panic("not implemented for gccgo") }
func kmaQuery() queryResult { panic("not implemented for gccgo") }
func kimdQuery() queryResult { panic("not implemented for gccgo") }
func klmdQuery() queryResult { panic("not implemented for gccgo") }

43
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c generated vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32
// +build gccgo
#include <cpuid.h>
#include <stdint.h>
// Need to wrap __get_cpuid_count because it's declared as static.
int
gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx);
}
// xgetbv reads the contents of an XCR (Extended Control Register)
// specified in the ECX register into registers EDX:EAX.
// Currently, the only supported value for XCR is 0.
//
// TODO: Replace with a better alternative:
//
// #include <xsaveintrin.h>
//
// #pragma GCC target("xsave")
//
// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) {
// unsigned long long x = _xgetbv(0);
// *eax = x & 0xffffffff;
// *edx = (x >> 32) & 0xffffffff;
// }
//
// Note that _xgetbv is defined starting with GCC 8.
void
gccgoXgetbv(uint32_t *eax, uint32_t *edx)
{
__asm(" xorl %%ecx, %%ecx\n"
" xgetbv"
: "=a"(*eax), "=d"(*edx));
}

33
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (386 || amd64 || amd64p32) && gccgo
// +build 386 amd64 amd64p32
// +build gccgo
package cpu
//extern gccgoGetCpuidCount
func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32)
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) {
var a, b, c, d uint32
gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d)
return a, b, c, d
}
//extern gccgoXgetbv
func gccgoXgetbv(eax, edx *uint32)
func xgetbv() (eax, edx uint32) {
var a, d uint32
gccgoXgetbv(&a, &d)
return a, d
}
// gccgo doesn't build on Darwin, per:
// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76
func darwinSupportsAVX512() bool {
return false
}

16
vendor/golang.org/x/sys/cpu/cpu_linux.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !386 && !amd64 && !amd64p32 && !arm64
// +build !386,!amd64,!amd64p32,!arm64
package cpu
func archInit() {
if err := readHWCAP(); err != nil {
return
}
doinit()
Initialized = true
}

39
vendor/golang.org/x/sys/cpu/cpu_linux_arm.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
func doinit() {
ARM.HasSWP = isSet(hwCap, hwcap_SWP)
ARM.HasHALF = isSet(hwCap, hwcap_HALF)
ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB)
ARM.Has26BIT = isSet(hwCap, hwcap_26BIT)
ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT)
ARM.HasFPA = isSet(hwCap, hwcap_FPA)
ARM.HasVFP = isSet(hwCap, hwcap_VFP)
ARM.HasEDSP = isSet(hwCap, hwcap_EDSP)
ARM.HasJAVA = isSet(hwCap, hwcap_JAVA)
ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT)
ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH)
ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE)
ARM.HasNEON = isSet(hwCap, hwcap_NEON)
ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3)
ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16)
ARM.HasTLS = isSet(hwCap, hwcap_TLS)
ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4)
ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA)
ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT)
ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32)
ARM.HasLPAE = isSet(hwCap, hwcap_LPAE)
ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
ARM.HasAES = isSet(hwCap2, hwcap2_AES)
ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL)
ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1)
ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2)
ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32)
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}

71
vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
// HWCAP/HWCAP2 bits. These are exposed by Linux.
const (
hwcap_FP = 1 << 0
hwcap_ASIMD = 1 << 1
hwcap_EVTSTRM = 1 << 2
hwcap_AES = 1 << 3
hwcap_PMULL = 1 << 4
hwcap_SHA1 = 1 << 5
hwcap_SHA2 = 1 << 6
hwcap_CRC32 = 1 << 7
hwcap_ATOMICS = 1 << 8
hwcap_FPHP = 1 << 9
hwcap_ASIMDHP = 1 << 10
hwcap_CPUID = 1 << 11
hwcap_ASIMDRDM = 1 << 12
hwcap_JSCVT = 1 << 13
hwcap_FCMA = 1 << 14
hwcap_LRCPC = 1 << 15
hwcap_DCPOP = 1 << 16
hwcap_SHA3 = 1 << 17
hwcap_SM3 = 1 << 18
hwcap_SM4 = 1 << 19
hwcap_ASIMDDP = 1 << 20
hwcap_SHA512 = 1 << 21
hwcap_SVE = 1 << 22
hwcap_ASIMDFHM = 1 << 23
)
func doinit() {
if err := readHWCAP(); err != nil {
// failed to read /proc/self/auxv, try reading registers directly
readARM64Registers()
return
}
// HWCAP feature bits
ARM64.HasFP = isSet(hwCap, hwcap_FP)
ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD)
ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
ARM64.HasAES = isSet(hwCap, hwcap_AES)
ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL)
ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1)
ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2)
ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32)
ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS)
ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP)
ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP)
ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID)
ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM)
ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT)
ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA)
ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC)
ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP)
ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3)
ARM64.HasSM3 = isSet(hwCap, hwcap_SM3)
ARM64.HasSM4 = isSet(hwCap, hwcap_SM4)
ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP)
ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512)
ARM64.HasSVE = isSet(hwCap, hwcap_SVE)
ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM)
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}

24
vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (mips64 || mips64le)
// +build linux
// +build mips64 mips64le
package cpu
// HWCAP bits. These are exposed by the Linux kernel 5.4.
const (
// CPU features
hwcap_MIPS_MSA = 1 << 1
)
func doinit() {
// HWCAP feature bits
MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA)
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}

10
vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x
// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x
package cpu
func doinit() {}

32
vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (ppc64 || ppc64le)
// +build linux
// +build ppc64 ppc64le
package cpu
// HWCAP/HWCAP2 bits. These are exposed by the kernel.
const (
// ISA Level
_PPC_FEATURE2_ARCH_2_07 = 0x80000000
_PPC_FEATURE2_ARCH_3_00 = 0x00800000
// CPU features
_PPC_FEATURE2_DARN = 0x00200000
_PPC_FEATURE2_SCV = 0x00100000
)
func doinit() {
// HWCAP2 feature bits
PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07)
PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00)
PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN)
PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV)
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}

40
vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
const (
// bit mask values from /usr/include/bits/hwcap.h
hwcap_ZARCH = 2
hwcap_STFLE = 4
hwcap_MSA = 8
hwcap_LDISP = 16
hwcap_EIMM = 32
hwcap_DFP = 64
hwcap_ETF3EH = 256
hwcap_VX = 2048
hwcap_VXE = 8192
)
func initS390Xbase() {
// test HWCAP bit vector
has := func(featureMask uint) bool {
return hwCap&featureMask == featureMask
}
// mandatory
S390X.HasZARCH = has(hwcap_ZARCH)
// optional
S390X.HasSTFLE = has(hwcap_STFLE)
S390X.HasLDISP = has(hwcap_LDISP)
S390X.HasEIMM = has(hwcap_EIMM)
S390X.HasETF3EH = has(hwcap_ETF3EH)
S390X.HasDFP = has(hwcap_DFP)
S390X.HasMSA = has(hwcap_MSA)
S390X.HasVX = has(hwcap_VX)
if S390X.HasVX {
S390X.HasVXE = has(hwcap_VXE)
}
}

16
vendor/golang.org/x/sys/cpu/cpu_mips64x.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build mips64 || mips64le
// +build mips64 mips64le
package cpu
const cacheLineSize = 32
func initOptions() {
options = []option{
{Name: "msa", Feature: &MIPS64X.HasMSA},
}
}

12
vendor/golang.org/x/sys/cpu/cpu_mipsx.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build mips || mipsle
// +build mips mipsle
package cpu
const cacheLineSize = 32
func initOptions() {}

173
vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"syscall"
"unsafe"
)
// Minimal copy of functionality from x/sys/unix so the cpu package can call
// sysctl without depending on x/sys/unix.
const (
_CTL_QUERY = -2
_SYSCTL_VERS_1 = 0x1000000
)
var _zero uintptr
func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, errno := syscall.Syscall6(
syscall.SYS___SYSCTL,
uintptr(_p0),
uintptr(len(mib)),
uintptr(unsafe.Pointer(old)),
uintptr(unsafe.Pointer(oldlen)),
uintptr(unsafe.Pointer(new)),
uintptr(newlen))
if errno != 0 {
return errno
}
return nil
}
type sysctlNode struct {
Flags uint32
Num int32
Name [32]int8
Ver uint32
__rsvd uint32
Un [16]byte
_sysctl_size [8]byte
_sysctl_func [8]byte
_sysctl_parent [8]byte
_sysctl_desc [8]byte
}
func sysctlNodes(mib []int32) ([]sysctlNode, error) {
var olen uintptr
// Get a list of all sysctl nodes below the given MIB by performing
// a sysctl for the given MIB with CTL_QUERY appended.
mib = append(mib, _CTL_QUERY)
qnode := sysctlNode{Flags: _SYSCTL_VERS_1}
qp := (*byte)(unsafe.Pointer(&qnode))
sz := unsafe.Sizeof(qnode)
if err := sysctl(mib, nil, &olen, qp, sz); err != nil {
return nil, err
}
// Now that we know the size, get the actual nodes.
nodes := make([]sysctlNode, olen/sz)
np := (*byte)(unsafe.Pointer(&nodes[0]))
if err := sysctl(mib, np, &olen, qp, sz); err != nil {
return nil, err
}
return nodes, nil
}
func nametomib(name string) ([]int32, error) {
// Split name into components.
var parts []string
last := 0
for i := 0; i < len(name); i++ {
if name[i] == '.' {
parts = append(parts, name[last:i])
last = i + 1
}
}
parts = append(parts, name[last:])
mib := []int32{}
// Discover the nodes and construct the MIB OID.
for partno, part := range parts {
nodes, err := sysctlNodes(mib)
if err != nil {
return nil, err
}
for _, node := range nodes {
n := make([]byte, 0)
for i := range node.Name {
if node.Name[i] != 0 {
n = append(n, byte(node.Name[i]))
}
}
if string(n) == part {
mib = append(mib, int32(node.Num))
break
}
}
if len(mib) != partno+1 {
return nil, err
}
}
return mib, nil
}
// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's <aarch64/armreg.h>
type aarch64SysctlCPUID struct {
midr uint64 /* Main ID Register */
revidr uint64 /* Revision ID Register */
mpidr uint64 /* Multiprocessor Affinity Register */
aa64dfr0 uint64 /* A64 Debug Feature Register 0 */
aa64dfr1 uint64 /* A64 Debug Feature Register 1 */
aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */
aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */
aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */
aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */
aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */
aa64pfr0 uint64 /* A64 Processor Feature Register 0 */
aa64pfr1 uint64 /* A64 Processor Feature Register 1 */
aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */
mvfr0 uint32 /* Media and VFP Feature Register 0 */
mvfr1 uint32 /* Media and VFP Feature Register 1 */
mvfr2 uint32 /* Media and VFP Feature Register 2 */
pad uint32
clidr uint64 /* Cache Level ID Register */
ctr uint64 /* Cache Type Register */
}
func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) {
mib, err := nametomib(name)
if err != nil {
return nil, err
}
out := aarch64SysctlCPUID{}
n := unsafe.Sizeof(out)
_, _, errno := syscall.Syscall6(
syscall.SYS___SYSCTL,
uintptr(unsafe.Pointer(&mib[0])),
uintptr(len(mib)),
uintptr(unsafe.Pointer(&out)),
uintptr(unsafe.Pointer(&n)),
uintptr(0),
uintptr(0))
if errno != 0 {
return nil, errno
}
return &out, nil
}
func doinit() {
cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id")
if err != nil {
setMinimalFeatures()
return
}
parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0)
Initialized = true
}

10
vendor/golang.org/x/sys/cpu/cpu_other_arm.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !linux && arm
// +build !linux,arm
package cpu
func archInit() {}

10
vendor/golang.org/x/sys/cpu/cpu_other_arm64.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !linux && !netbsd && arm64
// +build !linux,!netbsd,arm64
package cpu
func doinit() {}

13
vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !linux && (mips64 || mips64le)
// +build !linux
// +build mips64 mips64le
package cpu
func archInit() {
Initialized = true
}

17
vendor/golang.org/x/sys/cpu/cpu_ppc64x.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ppc64 || ppc64le
// +build ppc64 ppc64le
package cpu
const cacheLineSize = 128
func initOptions() {
options = []option{
{Name: "darn", Feature: &PPC64.HasDARN},
{Name: "scv", Feature: &PPC64.HasSCV},
}
}

12
vendor/golang.org/x/sys/cpu/cpu_riscv64.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build riscv64
// +build riscv64
package cpu
const cacheLineSize = 32
func initOptions() {}

172
vendor/golang.org/x/sys/cpu/cpu_s390x.go generated vendored Normal file
View File

@ -0,0 +1,172 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
const cacheLineSize = 256
func initOptions() {
options = []option{
{Name: "zarch", Feature: &S390X.HasZARCH, Required: true},
{Name: "stfle", Feature: &S390X.HasSTFLE, Required: true},
{Name: "ldisp", Feature: &S390X.HasLDISP, Required: true},
{Name: "eimm", Feature: &S390X.HasEIMM, Required: true},
{Name: "dfp", Feature: &S390X.HasDFP},
{Name: "etf3eh", Feature: &S390X.HasETF3EH},
{Name: "msa", Feature: &S390X.HasMSA},
{Name: "aes", Feature: &S390X.HasAES},
{Name: "aescbc", Feature: &S390X.HasAESCBC},
{Name: "aesctr", Feature: &S390X.HasAESCTR},
{Name: "aesgcm", Feature: &S390X.HasAESGCM},
{Name: "ghash", Feature: &S390X.HasGHASH},
{Name: "sha1", Feature: &S390X.HasSHA1},
{Name: "sha256", Feature: &S390X.HasSHA256},
{Name: "sha3", Feature: &S390X.HasSHA3},
{Name: "sha512", Feature: &S390X.HasSHA512},
{Name: "vx", Feature: &S390X.HasVX},
{Name: "vxe", Feature: &S390X.HasVXE},
}
}
// bitIsSet reports whether the bit at index is set. The bit index
// is in big endian order, so bit index 0 is the leftmost bit.
func bitIsSet(bits []uint64, index uint) bool {
return bits[index/64]&((1<<63)>>(index%64)) != 0
}
// facility is a bit index for the named facility.
type facility uint8
const (
// mandatory facilities
zarch facility = 1 // z architecture mode is active
stflef facility = 7 // store-facility-list-extended
ldisp facility = 18 // long-displacement
eimm facility = 21 // extended-immediate
// miscellaneous facilities
dfp facility = 42 // decimal-floating-point
etf3eh facility = 30 // extended-translation 3 enhancement
// cryptography facilities
msa facility = 17 // message-security-assist
msa3 facility = 76 // message-security-assist extension 3
msa4 facility = 77 // message-security-assist extension 4
msa5 facility = 57 // message-security-assist extension 5
msa8 facility = 146 // message-security-assist extension 8
msa9 facility = 155 // message-security-assist extension 9
// vector facilities
vx facility = 129 // vector facility
vxe facility = 135 // vector-enhancements 1
vxe2 facility = 148 // vector-enhancements 2
)
// facilityList contains the result of an STFLE call.
// Bits are numbered in big endian order so the
// leftmost bit (the MSB) is at index 0.
type facilityList struct {
bits [4]uint64
}
// Has reports whether the given facilities are present.
func (s *facilityList) Has(fs ...facility) bool {
if len(fs) == 0 {
panic("no facility bits provided")
}
for _, f := range fs {
if !bitIsSet(s.bits[:], uint(f)) {
return false
}
}
return true
}
// function is the code for the named cryptographic function.
type function uint8
const (
// KM{,A,C,CTR} function codes
aes128 function = 18 // AES-128
aes192 function = 19 // AES-192
aes256 function = 20 // AES-256
// K{I,L}MD function codes
sha1 function = 1 // SHA-1
sha256 function = 2 // SHA-256
sha512 function = 3 // SHA-512
sha3_224 function = 32 // SHA3-224
sha3_256 function = 33 // SHA3-256
sha3_384 function = 34 // SHA3-384
sha3_512 function = 35 // SHA3-512
shake128 function = 36 // SHAKE-128
shake256 function = 37 // SHAKE-256
// KLMD function codes
ghash function = 65 // GHASH
)
// queryResult contains the result of a Query function
// call. Bits are numbered in big endian order so the
// leftmost bit (the MSB) is at index 0.
type queryResult struct {
bits [2]uint64
}
// Has reports whether the given functions are present.
func (q *queryResult) Has(fns ...function) bool {
if len(fns) == 0 {
panic("no function codes provided")
}
for _, f := range fns {
if !bitIsSet(q.bits[:], uint(f)) {
return false
}
}
return true
}
func doinit() {
initS390Xbase()
// We need implementations of stfle, km and so on
// to detect cryptographic features.
if !haveAsmFunctions() {
return
}
// optional cryptographic functions
if S390X.HasMSA {
aes := []function{aes128, aes192, aes256}
// cipher message
km, kmc := kmQuery(), kmcQuery()
S390X.HasAES = km.Has(aes...)
S390X.HasAESCBC = kmc.Has(aes...)
if S390X.HasSTFLE {
facilities := stfle()
if facilities.Has(msa4) {
kmctr := kmctrQuery()
S390X.HasAESCTR = kmctr.Has(aes...)
}
if facilities.Has(msa8) {
kma := kmaQuery()
S390X.HasAESGCM = kma.Has(aes...)
}
}
// compute message digest
kimd := kimdQuery() // intermediate (no padding)
klmd := klmdQuery() // last (padding)
S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1)
S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256)
S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512)
S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist
sha3 := []function{
sha3_224, sha3_256, sha3_384, sha3_512,
shake128, shake256,
}
S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...)
}
}

58
vendor/golang.org/x/sys/cpu/cpu_s390x.s generated vendored Normal file
View File

@ -0,0 +1,58 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
// +build gc
#include "textflag.h"
// func stfle() facilityList
TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32
MOVD $ret+0(FP), R1
MOVD $3, R0 // last doubleword index to store
XC $32, (R1), (R1) // clear 4 doublewords (32 bytes)
WORD $0xb2b01000 // store facility list extended (STFLE)
RET
// func kmQuery() queryResult
TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16
MOVD $0, R0 // set function code to 0 (KM-Query)
MOVD $ret+0(FP), R1 // address of 16-byte return value
WORD $0xB92E0024 // cipher message (KM)
RET
// func kmcQuery() queryResult
TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16
MOVD $0, R0 // set function code to 0 (KMC-Query)
MOVD $ret+0(FP), R1 // address of 16-byte return value
WORD $0xB92F0024 // cipher message with chaining (KMC)
RET
// func kmctrQuery() queryResult
TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16
MOVD $0, R0 // set function code to 0 (KMCTR-Query)
MOVD $ret+0(FP), R1 // address of 16-byte return value
WORD $0xB92D4024 // cipher message with counter (KMCTR)
RET
// func kmaQuery() queryResult
TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16
MOVD $0, R0 // set function code to 0 (KMA-Query)
MOVD $ret+0(FP), R1 // address of 16-byte return value
WORD $0xb9296024 // cipher message with authentication (KMA)
RET
// func kimdQuery() queryResult
TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16
MOVD $0, R0 // set function code to 0 (KIMD-Query)
MOVD $ret+0(FP), R1 // address of 16-byte return value
WORD $0xB93E0024 // compute intermediate message digest (KIMD)
RET
// func klmdQuery() queryResult
TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16
MOVD $0, R0 // set function code to 0 (KLMD-Query)
MOVD $ret+0(FP), R1 // address of 16-byte return value
WORD $0xB93F0024 // compute last message digest (KLMD)
RET

18
vendor/golang.org/x/sys/cpu/cpu_wasm.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build wasm
// +build wasm
package cpu
// We're compiling the cpu package for an unknown (software-abstracted) CPU.
// Make CacheLinePad an empty struct and hope that the usual struct alignment
// rules are good enough.
const cacheLineSize = 0
func initOptions() {}
func archInit() {}

144
vendor/golang.org/x/sys/cpu/cpu_x86.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || amd64 || amd64p32
// +build 386 amd64 amd64p32
package cpu
import "runtime"
const cacheLineSize = 64
func initOptions() {
options = []option{
{Name: "adx", Feature: &X86.HasADX},
{Name: "aes", Feature: &X86.HasAES},
{Name: "avx", Feature: &X86.HasAVX},
{Name: "avx2", Feature: &X86.HasAVX2},
{Name: "avx512", Feature: &X86.HasAVX512},
{Name: "avx512f", Feature: &X86.HasAVX512F},
{Name: "avx512cd", Feature: &X86.HasAVX512CD},
{Name: "avx512er", Feature: &X86.HasAVX512ER},
{Name: "avx512pf", Feature: &X86.HasAVX512PF},
{Name: "avx512vl", Feature: &X86.HasAVX512VL},
{Name: "avx512bw", Feature: &X86.HasAVX512BW},
{Name: "avx512dq", Feature: &X86.HasAVX512DQ},
{Name: "avx512ifma", Feature: &X86.HasAVX512IFMA},
{Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI},
{Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW},
{Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS},
{Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ},
{Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ},
{Name: "avx512vnni", Feature: &X86.HasAVX512VNNI},
{Name: "avx512gfni", Feature: &X86.HasAVX512GFNI},
{Name: "avx512vaes", Feature: &X86.HasAVX512VAES},
{Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2},
{Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG},
{Name: "avx512bf16", Feature: &X86.HasAVX512BF16},
{Name: "bmi1", Feature: &X86.HasBMI1},
{Name: "bmi2", Feature: &X86.HasBMI2},
{Name: "cx16", Feature: &X86.HasCX16},
{Name: "erms", Feature: &X86.HasERMS},
{Name: "fma", Feature: &X86.HasFMA},
{Name: "osxsave", Feature: &X86.HasOSXSAVE},
{Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
{Name: "popcnt", Feature: &X86.HasPOPCNT},
{Name: "rdrand", Feature: &X86.HasRDRAND},
{Name: "rdseed", Feature: &X86.HasRDSEED},
{Name: "sse3", Feature: &X86.HasSSE3},
{Name: "sse41", Feature: &X86.HasSSE41},
{Name: "sse42", Feature: &X86.HasSSE42},
{Name: "ssse3", Feature: &X86.HasSSSE3},
// These capabilities should always be enabled on amd64:
{Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
}
}
func archInit() {
Initialized = true
maxID, _, _, _ := cpuid(0, 0)
if maxID < 1 {
return
}
_, _, ecx1, edx1 := cpuid(1, 0)
X86.HasSSE2 = isSet(26, edx1)
X86.HasSSE3 = isSet(0, ecx1)
X86.HasPCLMULQDQ = isSet(1, ecx1)
X86.HasSSSE3 = isSet(9, ecx1)
X86.HasFMA = isSet(12, ecx1)
X86.HasCX16 = isSet(13, ecx1)
X86.HasSSE41 = isSet(19, ecx1)
X86.HasSSE42 = isSet(20, ecx1)
X86.HasPOPCNT = isSet(23, ecx1)
X86.HasAES = isSet(25, ecx1)
X86.HasOSXSAVE = isSet(27, ecx1)
X86.HasRDRAND = isSet(30, ecx1)
var osSupportsAVX, osSupportsAVX512 bool
// For XGETBV, OSXSAVE bit is required and sufficient.
if X86.HasOSXSAVE {
eax, _ := xgetbv()
// Check if XMM and YMM registers have OS support.
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
if runtime.GOOS == "darwin" {
// Check darwin commpage for AVX512 support. Necessary because:
// https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/osfmk/i386/fpu.c#L175-L201
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
} else {
// Check if OPMASK and ZMM registers have OS support.
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
}
}
X86.HasAVX = isSet(28, ecx1) && osSupportsAVX
if maxID < 7 {
return
}
_, ebx7, ecx7, edx7 := cpuid(7, 0)
X86.HasBMI1 = isSet(3, ebx7)
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
X86.HasBMI2 = isSet(8, ebx7)
X86.HasERMS = isSet(9, ebx7)
X86.HasRDSEED = isSet(18, ebx7)
X86.HasADX = isSet(19, ebx7)
X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
if X86.HasAVX512 {
X86.HasAVX512F = true
X86.HasAVX512CD = isSet(28, ebx7)
X86.HasAVX512ER = isSet(27, ebx7)
X86.HasAVX512PF = isSet(26, ebx7)
X86.HasAVX512VL = isSet(31, ebx7)
X86.HasAVX512BW = isSet(30, ebx7)
X86.HasAVX512DQ = isSet(17, ebx7)
X86.HasAVX512IFMA = isSet(21, ebx7)
X86.HasAVX512VBMI = isSet(1, ecx7)
X86.HasAVX5124VNNIW = isSet(2, edx7)
X86.HasAVX5124FMAPS = isSet(3, edx7)
X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7)
X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7)
X86.HasAVX512VNNI = isSet(11, ecx7)
X86.HasAVX512GFNI = isSet(8, ecx7)
X86.HasAVX512VAES = isSet(9, ecx7)
X86.HasAVX512VBMI2 = isSet(6, ecx7)
X86.HasAVX512BITALG = isSet(12, ecx7)
eax71, _, _, _ := cpuid(7, 1)
X86.HasAVX512BF16 = isSet(5, eax71)
}
}
func isSet(bitpos uint, value uint32) bool {
return value&(1<<bitpos) != 0
}

52
vendor/golang.org/x/sys/cpu/cpu_x86.s generated vendored Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (386 || amd64 || amd64p32) && gc
// +build 386 amd64 amd64p32
// +build gc
#include "textflag.h"
// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
TEXT ·cpuid(SB), NOSPLIT, $0-24
MOVL eaxArg+0(FP), AX
MOVL ecxArg+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func xgetbv() (eax, edx uint32)
TEXT ·xgetbv(SB),NOSPLIT,$0-8
MOVL $0, CX
XGETBV
MOVL AX, eax+0(FP)
MOVL DX, edx+4(FP)
RET
// func darwinSupportsAVX512() bool
TEXT ·darwinSupportsAVX512(SB), NOSPLIT, $0-1
MOVB $0, ret+0(FP) // default to false
#ifdef GOOS_darwin // return if not darwin
#ifdef GOARCH_amd64 // return if not amd64
// These values from:
// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h
#define commpage64_base_address 0x00007fffffe00000
#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010)
#define commpage64_version (commpage64_base_address+0x01E)
#define hasAVX512F 0x0000004000000000
MOVQ $commpage64_version, BX
CMPW (BX), $13 // cpu_capabilities64 undefined in versions < 13
JL no_avx512
MOVQ $commpage64_cpu_capabilities64, BX
MOVQ $hasAVX512F, CX
TESTQ (BX), CX
JZ no_avx512
MOVB $1, ret+0(FP)
no_avx512:
#endif
#endif
RET

10
vendor/golang.org/x/sys/cpu/cpu_zos.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
func archInit() {
doinit()
Initialized = true
}

25
vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
func initS390Xbase() {
// get the facilities list
facilities := stfle()
// mandatory
S390X.HasZARCH = facilities.Has(zarch)
S390X.HasSTFLE = facilities.Has(stflef)
S390X.HasLDISP = facilities.Has(ldisp)
S390X.HasEIMM = facilities.Has(eimm)
// optional
S390X.HasETF3EH = facilities.Has(etf3eh)
S390X.HasDFP = facilities.Has(dfp)
S390X.HasMSA = facilities.Has(msa)
S390X.HasVX = facilities.Has(vx)
if S390X.HasVX {
S390X.HasVXE = facilities.Has(vxe)
}
}

56
vendor/golang.org/x/sys/cpu/hwcap_linux.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"io/ioutil"
)
const (
_AT_HWCAP = 16
_AT_HWCAP2 = 26
procAuxv = "/proc/self/auxv"
uintSize = int(32 << (^uint(0) >> 63))
)
// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2
// These are initialized in cpu_$GOARCH.go
// and should not be changed after they are initialized.
var hwCap uint
var hwCap2 uint
func readHWCAP() error {
buf, err := ioutil.ReadFile(procAuxv)
if err != nil {
// e.g. on android /proc/self/auxv is not accessible, so silently
// ignore the error and leave Initialized = false. On some
// architectures (e.g. arm64) doinit() implements a fallback
// readout and will set Initialized = true again.
return err
}
bo := hostByteOrder()
for len(buf) >= 2*(uintSize/8) {
var tag, val uint
switch uintSize {
case 32:
tag = uint(bo.Uint32(buf[0:]))
val = uint(bo.Uint32(buf[4:]))
buf = buf[8:]
case 64:
tag = uint(bo.Uint64(buf[0:]))
val = uint(bo.Uint64(buf[8:]))
buf = buf[16:]
}
switch tag {
case _AT_HWCAP:
hwCap = val
case _AT_HWCAP2:
hwCap2 = val
}
}
return nil
}

27
vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Recreate a getsystemcfg syscall handler instead of
// using the one provided by x/sys/unix to avoid having
// the dependency between them. (See golang.org/issue/32102)
// Morever, this file will be used during the building of
// gccgo's libgo and thus must not used a CGo method.
//go:build aix && gccgo
// +build aix,gccgo
package cpu
import (
"syscall"
)
//extern getsystemcfg
func gccgoGetsystemcfg(label uint32) (r uint64)
func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) {
r1 = uintptr(gccgoGetsystemcfg(uint32(label)))
e1 = syscall.GetErrno()
return
}

36
vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Minimal copy of x/sys/unix so the cpu package can make a
// system call on AIX without depending on x/sys/unix.
// (See golang.org/issue/32102)
//go:build aix && ppc64 && gc
// +build aix,ppc64,gc
package cpu
import (
"syscall"
"unsafe"
)
//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o"
//go:linkname libc_getsystemcfg libc_getsystemcfg
type syscallFunc uintptr
var libc_getsystemcfg syscallFunc
type errno = syscall.Errno
// Implemented in runtime/syscall_aix.go.
func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno)
func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno)
func callgetsystemcfg(label int) (r1 uintptr, e1 errno) {
r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0)
return
}

23
vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore generated vendored Normal file
View File

@ -0,0 +1,23 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

6
vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,6 @@
language: go
go:
- 1.8
- 1.7
- 1.6

21
vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Nate Finch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

179
vendor/gopkg.in/natefinch/lumberjack.v2/README.md generated vendored Normal file
View File

@ -0,0 +1,179 @@
# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://travis-ci.org/natefinch/lumberjack.svg?branch=v2.0)](https://travis-ci.org/natefinch/lumberjack) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0)
### Lumberjack is a Go package for writing logs to rolling files.
Package lumberjack provides a rolling logger.
Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
thusly:
import "gopkg.in/natefinch/lumberjack.v2"
The package name remains simply lumberjack, and the code resides at
https://github.com/natefinch/lumberjack under the v2.0 branch.
Lumberjack is intended to be one part of a logging infrastructure.
It is not an all-in-one solution, but instead is a pluggable
component at the bottom of the logging stack that simply controls the files
to which logs are written.
Lumberjack plays well with any logging package that can write to an
io.Writer, including the standard library's log package.
Lumberjack assumes that only one process is writing to the output files.
Using the same lumberjack configuration from multiple processes on the same
machine will result in improper behavior.
**Example**
To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
Code:
```go
log.SetOutput(&lumberjack.Logger{
Filename: "/var/log/myapp/foo.log",
MaxSize: 500, // megabytes
MaxBackups: 3,
MaxAge: 28, //days
Compress: true, // disabled by default
})
```
## type Logger
``` go
type Logger struct {
// Filename is the file to write logs to. Backup log files will be retained
// in the same directory. It uses <processname>-lumberjack.log in
// os.TempDir() if empty.
Filename string `json:"filename" yaml:"filename"`
// MaxSize is the maximum size in megabytes of the log file before it gets
// rotated. It defaults to 100 megabytes.
MaxSize int `json:"maxsize" yaml:"maxsize"`
// MaxAge is the maximum number of days to retain old log files based on the
// timestamp encoded in their filename. Note that a day is defined as 24
// hours and may not exactly correspond to calendar days due to daylight
// savings, leap seconds, etc. The default is not to remove old log files
// based on age.
MaxAge int `json:"maxage" yaml:"maxage"`
// MaxBackups is the maximum number of old log files to retain. The default
// is to retain all old log files (though MaxAge may still cause them to get
// deleted.)
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
// LocalTime determines if the time used for formatting the timestamps in
// backup files is the computer's local time. The default is to use UTC
// time.
LocalTime bool `json:"localtime" yaml:"localtime"`
// Compress determines if the rotated log files should be compressed
// using gzip. The default is not to perform compression.
Compress bool `json:"compress" yaml:"compress"`
// contains filtered or unexported fields
}
```
Logger is an io.WriteCloser that writes to the specified filename.
Logger opens or creates the logfile on first Write. If the file exists and
is less than MaxSize megabytes, lumberjack will open and append to that file.
If the file exists and its size is >= MaxSize megabytes, the file is renamed
by putting the current time in a timestamp in the name immediately before the
file's extension (or the end of the filename if there's no extension). A new
log file is then created using original filename.
Whenever a write would cause the current log file exceed MaxSize megabytes,
the current file is closed, renamed, and a new log file created with the
original name. Thus, the filename you give Logger is always the "current" log
file.
Backups use the log file name given to Logger, in the form `name-timestamp.ext`
where name is the filename without the extension, timestamp is the time at which
the log was rotated formatted with the time.Time format of
`2006-01-02T15-04-05.000` and the extension is the original extension. For
example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created
at 6:30pm on Nov 11 2016 would use the filename
`/var/log/foo/server-2016-11-04T18-30-00.000.log`
### Cleaning Up Old Log Files
Whenever a new logfile gets created, old log files may be deleted. The most
recent files according to the encoded timestamp will be retained, up to a
number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
with an encoded timestamp older than MaxAge days are deleted, regardless of
MaxBackups. Note that the time encoded in the timestamp is the rotation
time, which may differ from the last time that file was written to.
If MaxBackups and MaxAge are both 0, no old log files will be deleted.
### func (\*Logger) Close
``` go
func (l *Logger) Close() error
```
Close implements io.Closer, and closes the current logfile.
### func (\*Logger) Rotate
``` go
func (l *Logger) Rotate() error
```
Rotate causes Logger to close the existing log file and immediately create a
new one. This is a helper function for applications that want to initiate
rotations outside of the normal rotation rules, such as in response to
SIGHUP. After rotating, this initiates a cleanup of old log files according
to the normal rules.
**Example**
Example of how to rotate in response to SIGHUP.
Code:
```go
l := &lumberjack.Logger{}
log.SetOutput(l)
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
go func() {
for {
<-c
l.Rotate()
}
}()
```
### func (\*Logger) Write
``` go
func (l *Logger) Write(p []byte) (n int, err error)
```
Write implements io.Writer. If a write would cause the log file to be larger
than MaxSize, the file is closed, renamed to include a timestamp of the
current time, and a new log file is created using the original log file name.
If the length of the write is greater than MaxSize, an error is returned.
- - -
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)

11
vendor/gopkg.in/natefinch/lumberjack.v2/chown.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build !linux
package lumberjack
import (
"os"
)
func chown(_ string, _ os.FileInfo) error {
return nil
}

19
vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
package lumberjack
import (
"os"
"syscall"
)
// os_Chown is a var so we can mock it out during tests.
var os_Chown = os.Chown
func chown(name string, info os.FileInfo) error {
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
if err != nil {
return err
}
f.Close()
stat := info.Sys().(*syscall.Stat_t)
return os_Chown(name, int(stat.Uid), int(stat.Gid))
}

541
vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go generated vendored Normal file
View File

@ -0,0 +1,541 @@
// Package lumberjack provides a rolling logger.
//
// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
// thusly:
//
// import "gopkg.in/natefinch/lumberjack.v2"
//
// The package name remains simply lumberjack, and the code resides at
// https://github.com/natefinch/lumberjack under the v2.0 branch.
//
// Lumberjack is intended to be one part of a logging infrastructure.
// It is not an all-in-one solution, but instead is a pluggable
// component at the bottom of the logging stack that simply controls the files
// to which logs are written.
//
// Lumberjack plays well with any logging package that can write to an
// io.Writer, including the standard library's log package.
//
// Lumberjack assumes that only one process is writing to the output files.
// Using the same lumberjack configuration from multiple processes on the same
// machine will result in improper behavior.
package lumberjack
import (
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
)
const (
backupTimeFormat = "2006-01-02T15-04-05.000"
compressSuffix = ".gz"
defaultMaxSize = 100
)
// ensure we always implement io.WriteCloser
var _ io.WriteCloser = (*Logger)(nil)
// Logger is an io.WriteCloser that writes to the specified filename.
//
// Logger opens or creates the logfile on first Write. If the file exists and
// is less than MaxSize megabytes, lumberjack will open and append to that file.
// If the file exists and its size is >= MaxSize megabytes, the file is renamed
// by putting the current time in a timestamp in the name immediately before the
// file's extension (or the end of the filename if there's no extension). A new
// log file is then created using original filename.
//
// Whenever a write would cause the current log file exceed MaxSize megabytes,
// the current file is closed, renamed, and a new log file created with the
// original name. Thus, the filename you give Logger is always the "current" log
// file.
//
// Backups use the log file name given to Logger, in the form
// `name-timestamp.ext` where name is the filename without the extension,
// timestamp is the time at which the log was rotated formatted with the
// time.Time format of `2006-01-02T15-04-05.000` and the extension is the
// original extension. For example, if your Logger.Filename is
// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
//
// Cleaning Up Old Log Files
//
// Whenever a new logfile gets created, old log files may be deleted. The most
// recent files according to the encoded timestamp will be retained, up to a
// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
// with an encoded timestamp older than MaxAge days are deleted, regardless of
// MaxBackups. Note that the time encoded in the timestamp is the rotation
// time, which may differ from the last time that file was written to.
//
// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
type Logger struct {
// Filename is the file to write logs to. Backup log files will be retained
// in the same directory. It uses <processname>-lumberjack.log in
// os.TempDir() if empty.
Filename string `json:"filename" yaml:"filename"`
// MaxSize is the maximum size in megabytes of the log file before it gets
// rotated. It defaults to 100 megabytes.
MaxSize int `json:"maxsize" yaml:"maxsize"`
// MaxAge is the maximum number of days to retain old log files based on the
// timestamp encoded in their filename. Note that a day is defined as 24
// hours and may not exactly correspond to calendar days due to daylight
// savings, leap seconds, etc. The default is not to remove old log files
// based on age.
MaxAge int `json:"maxage" yaml:"maxage"`
// MaxBackups is the maximum number of old log files to retain. The default
// is to retain all old log files (though MaxAge may still cause them to get
// deleted.)
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
// LocalTime determines if the time used for formatting the timestamps in
// backup files is the computer's local time. The default is to use UTC
// time.
LocalTime bool `json:"localtime" yaml:"localtime"`
// Compress determines if the rotated log files should be compressed
// using gzip. The default is not to perform compression.
Compress bool `json:"compress" yaml:"compress"`
size int64
file *os.File
mu sync.Mutex
millCh chan bool
startMill sync.Once
}
var (
// currentTime exists so it can be mocked out by tests.
currentTime = time.Now
// os_Stat exists so it can be mocked out by tests.
os_Stat = os.Stat
// megabyte is the conversion factor between MaxSize and bytes. It is a
// variable so tests can mock it out and not need to write megabytes of data
// to disk.
megabyte = 1024 * 1024
)
// Write implements io.Writer. If a write would cause the log file to be larger
// than MaxSize, the file is closed, renamed to include a timestamp of the
// current time, and a new log file is created using the original log file name.
// If the length of the write is greater than MaxSize, an error is returned.
func (l *Logger) Write(p []byte) (n int, err error) {
l.mu.Lock()
defer l.mu.Unlock()
writeLen := int64(len(p))
if writeLen > l.max() {
return 0, fmt.Errorf(
"write length %d exceeds maximum file size %d", writeLen, l.max(),
)
}
if l.file == nil {
if err = l.openExistingOrNew(len(p)); err != nil {
return 0, err
}
}
if l.size+writeLen > l.max() {
if err := l.rotate(); err != nil {
return 0, err
}
}
n, err = l.file.Write(p)
l.size += int64(n)
return n, err
}
// Close implements io.Closer, and closes the current logfile.
func (l *Logger) Close() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.close()
}
// close closes the file if it is open.
func (l *Logger) close() error {
if l.file == nil {
return nil
}
err := l.file.Close()
l.file = nil
return err
}
// Rotate causes Logger to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates compression and removal of old log
// files according to the configuration.
func (l *Logger) Rotate() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.rotate()
}
// rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs
// post-rotation processing and removal.
func (l *Logger) rotate() error {
if err := l.close(); err != nil {
return err
}
if err := l.openNew(); err != nil {
return err
}
l.mill()
return nil
}
// openNew opens a new log file for writing, moving any old log file out of the
// way. This methods assumes the file has already been closed.
func (l *Logger) openNew() error {
err := os.MkdirAll(l.dir(), 0744)
if err != nil {
return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
mode := os.FileMode(0644)
info, err := os_Stat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
// move the existing file
newname := backupName(name, l.LocalTime)
if err := os.Rename(name, newname); err != nil {
return fmt.Errorf("can't rename log file: %s", err)
}
// this is a no-op anywhere but linux
if err := chown(name, info); err != nil {
return err
}
}
// we use truncate here because this should only get called when we've moved
// the file ourselves. if someone else creates the file in the meantime,
// just wipe out the contents.
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {
return fmt.Errorf("can't open new logfile: %s", err)
}
l.file = f
l.size = 0
return nil
}
// backupName creates a new filename from the given name, inserting a timestamp
// between the filename and the extension, using the local time if requested
// (otherwise UTC).
func backupName(name string, local bool) string {
dir := filepath.Dir(name)
filename := filepath.Base(name)
ext := filepath.Ext(filename)
prefix := filename[:len(filename)-len(ext)]
t := currentTime()
if !local {
t = t.UTC()
}
timestamp := t.Format(backupTimeFormat)
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
}
// openExistingOrNew opens the logfile if it exists and if the current write
// would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created.
func (l *Logger) openExistingOrNew(writeLen int) error {
l.mill()
filename := l.filename()
info, err := os_Stat(filename)
if os.IsNotExist(err) {
return l.openNew()
}
if err != nil {
return fmt.Errorf("error getting log file info: %s", err)
}
if info.Size()+int64(writeLen) >= l.max() {
return l.rotate()
}
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
// if we fail to open the old log file for some reason, just ignore
// it and open a new log file.
return l.openNew()
}
l.file = file
l.size = info.Size()
return nil
}
// genFilename generates the name of the logfile from the current time.
func (l *Logger) filename() string {
if l.Filename != "" {
return l.Filename
}
name := filepath.Base(os.Args[0]) + "-lumberjack.log"
return filepath.Join(os.TempDir(), name)
}
// millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *Logger) millRunOnce() error {
if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
return nil
}
files, err := l.oldLogFiles()
if err != nil {
return err
}
var compress, remove []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
preserved := make(map[string]bool)
var remaining []logInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.Name()
if strings.HasSuffix(fn, compressSuffix) {
fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true
if len(preserved) > l.MaxBackups {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
if l.MaxAge > 0 {
diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
cutoff := currentTime().Add(-1 * diff)
var remaining []logInfo
for _, f := range files {
if f.timestamp.Before(cutoff) {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
if l.Compress {
for _, f := range files {
if !strings.HasSuffix(f.Name(), compressSuffix) {
compress = append(compress, f)
}
}
}
for _, f := range remove {
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
if err == nil && errRemove != nil {
err = errRemove
}
}
for _, f := range compress {
fn := filepath.Join(l.dir(), f.Name())
errCompress := compressLogFile(fn, fn+compressSuffix)
if err == nil && errCompress != nil {
err = errCompress
}
}
return err
}
// millRun runs in a goroutine to manage post-rotation compression and removal
// of old log files.
func (l *Logger) millRun() {
for _ = range l.millCh {
// what am I going to do, log this?
_ = l.millRunOnce()
}
}
// mill performs post-rotation compression and removal of stale log files,
// starting the mill goroutine if necessary.
func (l *Logger) mill() {
l.startMill.Do(func() {
l.millCh = make(chan bool, 1)
go l.millRun()
})
select {
case l.millCh <- true:
default:
}
}
// oldLogFiles returns the list of backup log files stored in the same
// directory as the current log file, sorted by ModTime
func (l *Logger) oldLogFiles() ([]logInfo, error) {
files, err := ioutil.ReadDir(l.dir())
if err != nil {
return nil, fmt.Errorf("can't read log file directory: %s", err)
}
logFiles := []logInfo{}
prefix, ext := l.prefixAndExt()
for _, f := range files {
if f.IsDir() {
continue
}
if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
logFiles = append(logFiles, logInfo{t, f})
continue
}
if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
logFiles = append(logFiles, logInfo{t, f})
continue
}
// error parsing means that the suffix at the end was not generated
// by lumberjack, and therefore it's not a backup file.
}
sort.Sort(byFormatTime(logFiles))
return logFiles, nil
}
// timeFromName extracts the formatted time from the filename by stripping off
// the filename's prefix and extension. This prevents someone's filename from
// confusing time.parse.
func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
if !strings.HasPrefix(filename, prefix) {
return time.Time{}, errors.New("mismatched prefix")
}
if !strings.HasSuffix(filename, ext) {
return time.Time{}, errors.New("mismatched extension")
}
ts := filename[len(prefix) : len(filename)-len(ext)]
return time.Parse(backupTimeFormat, ts)
}
// max returns the maximum size in bytes of log files before rolling.
func (l *Logger) max() int64 {
if l.MaxSize == 0 {
return int64(defaultMaxSize * megabyte)
}
return int64(l.MaxSize) * int64(megabyte)
}
// dir returns the directory for the current filename.
func (l *Logger) dir() string {
return filepath.Dir(l.filename())
}
// prefixAndExt returns the filename part and extension part from the Logger's
// filename.
func (l *Logger) prefixAndExt() (prefix, ext string) {
filename := filepath.Base(l.filename())
ext = filepath.Ext(filename)
prefix = filename[:len(filename)-len(ext)] + "-"
return prefix, ext
}
// compressLogFile compresses the given log file, removing the
// uncompressed log file if successful.
func compressLogFile(src, dst string) (err error) {
f, err := os.Open(src)
if err != nil {
return fmt.Errorf("failed to open log file: %v", err)
}
defer f.Close()
fi, err := os_Stat(src)
if err != nil {
return fmt.Errorf("failed to stat log file: %v", err)
}
if err := chown(dst, fi); err != nil {
return fmt.Errorf("failed to chown compressed log file: %v", err)
}
// If this file already exists, we presume it was created by
// a previous attempt to compress the log file.
gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
if err != nil {
return fmt.Errorf("failed to open compressed log file: %v", err)
}
defer gzf.Close()
gz := gzip.NewWriter(gzf)
defer func() {
if err != nil {
os.Remove(dst)
err = fmt.Errorf("failed to compress log file: %v", err)
}
}()
if _, err := io.Copy(gz, f); err != nil {
return err
}
if err := gz.Close(); err != nil {
return err
}
if err := gzf.Close(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
if err := os.Remove(src); err != nil {
return err
}
return nil
}
// logInfo is a convenience struct to return the filename and its embedded
// timestamp.
type logInfo struct {
timestamp time.Time
os.FileInfo
}
// byFormatTime sorts by newest time formatted in the name.
type byFormatTime []logInfo
func (b byFormatTime) Less(i, j int) bool {
return b[i].timestamp.After(b[j].timestamp)
}
func (b byFormatTime) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
func (b byFormatTime) Len() int {
return len(b)
}

View File

@ -0,0 +1,37 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/apis/audit/v1"
"k8s.io/apiserver/pkg/apis/audit/v1alpha1"
"k8s.io/apiserver/pkg/apis/audit/v1beta1"
)
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(audit.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
}

19
vendor/k8s.io/apiserver/pkg/apis/config/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package config // import "k8s.io/apiserver/pkg/apis/config"

53
vendor/k8s.io/apiserver/pkg/apis/config/register.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme adds this group to a scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name use in this package.
const GroupName = "apiserver.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects.
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind.
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this will get cleaned up with the scheme types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&EncryptionConfiguration{},
)
return nil
}

100
vendor/k8s.io/apiserver/pkg/apis/config/types.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EncryptionConfiguration stores the complete configuration for encryption providers.
type EncryptionConfiguration struct {
metav1.TypeMeta
// resources is a list containing resources, and their corresponding encryption providers.
Resources []ResourceConfiguration
}
// ResourceConfiguration stores per resource configuration.
type ResourceConfiguration struct {
// resources is a list of kubernetes resources which have to be encrypted.
Resources []string
// providers is a list of transformers to be used for reading and writing the resources to disk.
// eg: aesgcm, aescbc, secretbox, identity.
Providers []ProviderConfiguration
}
// ProviderConfiguration stores the provided configuration for an encryption provider.
type ProviderConfiguration struct {
// aesgcm is the configuration for the AES-GCM transformer.
AESGCM *AESConfiguration
// aescbc is the configuration for the AES-CBC transformer.
AESCBC *AESConfiguration
// secretbox is the configuration for the Secretbox based transformer.
Secretbox *SecretboxConfiguration
// identity is the (empty) configuration for the identity transformer.
Identity *IdentityConfiguration
// kms contains the name, cache size and path to configuration file for a KMS based envelope transformer.
KMS *KMSConfiguration
}
// AESConfiguration contains the API configuration for an AES transformer.
type AESConfiguration struct {
// keys is a list of keys to be used for creating the AES transformer.
// Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM.
Keys []Key
}
// SecretboxConfiguration contains the API configuration for an Secretbox transformer.
type SecretboxConfiguration struct {
// keys is a list of keys to be used for creating the Secretbox transformer.
// Each key has to be 32 bytes long.
Keys []Key
}
// Key contains name and secret of the provided key for a transformer.
type Key struct {
// name is the name of the key to be used while storing data to disk.
Name string
// secret is the actual key, encoded in base64.
Secret string
}
// String implements Stringer interface in a log safe way.
func (k Key) String() string {
return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name)
}
// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration.
type IdentityConfiguration struct{}
// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer.
type KMSConfiguration struct {
// name is the name of the KMS plugin to be used.
Name string
// cachesize is the maximum number of secrets which are cached in memory. The default value is 1000.
// Set to a negative value to disable caching.
// +optional
CacheSize *int32
// endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock".
Endpoint string
// timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds.
// +optional
Timeout *metav1.Duration
}

44
vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
var (
defaultTimeout = &metav1.Duration{Duration: 3 * time.Second}
defaultCacheSize int32 = 1000
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_KMSConfiguration applies defaults to KMSConfiguration.
func SetDefaults_KMSConfiguration(obj *KMSConfiguration) {
if obj.Timeout == nil {
obj.Timeout = defaultTimeout
}
if obj.CacheSize == nil {
obj.CacheSize = &defaultCacheSize
}
}

23
vendor/k8s.io/apiserver/pkg/apis/config/v1/doc.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/config
// +k8s:deepcopy-gen=package
// +k8s:defaulter-gen=TypeMeta
// +groupName=apiserver.config.k8s.io
// Package v1 is the v1 version of the API.
package v1

53
vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package.
const GroupName = "apiserver.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects.
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// AddToScheme adds this group to a scheme.
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
localSchemeBuilder.Register(addDefaultingFuncs)
}
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&EncryptionConfiguration{},
)
// also register into the v1 group as EncryptionConfig (due to a docs bug)
scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "EncryptionConfig"}, &EncryptionConfiguration{})
return nil
}

100
vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EncryptionConfiguration stores the complete configuration for encryption providers.
type EncryptionConfiguration struct {
metav1.TypeMeta
// resources is a list containing resources, and their corresponding encryption providers.
Resources []ResourceConfiguration `json:"resources"`
}
// ResourceConfiguration stores per resource configuration.
type ResourceConfiguration struct {
// resources is a list of kubernetes resources which have to be encrypted.
Resources []string `json:"resources"`
// providers is a list of transformers to be used for reading and writing the resources to disk.
// eg: aesgcm, aescbc, secretbox, identity.
Providers []ProviderConfiguration `json:"providers"`
}
// ProviderConfiguration stores the provided configuration for an encryption provider.
type ProviderConfiguration struct {
// aesgcm is the configuration for the AES-GCM transformer.
AESGCM *AESConfiguration `json:"aesgcm,omitempty"`
// aescbc is the configuration for the AES-CBC transformer.
AESCBC *AESConfiguration `json:"aescbc,omitempty"`
// secretbox is the configuration for the Secretbox based transformer.
Secretbox *SecretboxConfiguration `json:"secretbox,omitempty"`
// identity is the (empty) configuration for the identity transformer.
Identity *IdentityConfiguration `json:"identity,omitempty"`
// kms contains the name, cache size and path to configuration file for a KMS based envelope transformer.
KMS *KMSConfiguration `json:"kms,omitempty"`
}
// AESConfiguration contains the API configuration for an AES transformer.
type AESConfiguration struct {
// keys is a list of keys to be used for creating the AES transformer.
// Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM.
Keys []Key `json:"keys"`
}
// SecretboxConfiguration contains the API configuration for an Secretbox transformer.
type SecretboxConfiguration struct {
// keys is a list of keys to be used for creating the Secretbox transformer.
// Each key has to be 32 bytes long.
Keys []Key `json:"keys"`
}
// Key contains name and secret of the provided key for a transformer.
type Key struct {
// name is the name of the key to be used while storing data to disk.
Name string `json:"name"`
// secret is the actual key, encoded in base64.
Secret string `json:"secret"`
}
// String implements Stringer interface in a log safe way.
func (k Key) String() string {
return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name)
}
// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration.
type IdentityConfiguration struct{}
// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer.
type KMSConfiguration struct {
// name is the name of the KMS plugin to be used.
Name string `json:"name"`
// cachesize is the maximum number of secrets which are cached in memory. The default value is 1000.
// Set to a negative value to disable caching.
// +optional
CacheSize *int32 `json:"cachesize,omitempty"`
// endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock".
Endpoint string `json:"endpoint"`
// timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds.
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
}

View File

@ -0,0 +1,296 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
config "k8s.io/apiserver/pkg/apis/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*AESConfiguration)(nil), (*config.AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AESConfiguration_To_config_AESConfiguration(a.(*AESConfiguration), b.(*config.AESConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.AESConfiguration)(nil), (*AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_AESConfiguration_To_v1_AESConfiguration(a.(*config.AESConfiguration), b.(*AESConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*EncryptionConfiguration)(nil), (*config.EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(a.(*EncryptionConfiguration), b.(*config.EncryptionConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.EncryptionConfiguration)(nil), (*EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(a.(*config.EncryptionConfiguration), b.(*EncryptionConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*IdentityConfiguration)(nil), (*config.IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration(a.(*IdentityConfiguration), b.(*config.IdentityConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.IdentityConfiguration)(nil), (*IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration(a.(*config.IdentityConfiguration), b.(*IdentityConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*KMSConfiguration)(nil), (*config.KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_KMSConfiguration_To_config_KMSConfiguration(a.(*KMSConfiguration), b.(*config.KMSConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KMSConfiguration)(nil), (*KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KMSConfiguration_To_v1_KMSConfiguration(a.(*config.KMSConfiguration), b.(*KMSConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Key)(nil), (*config.Key)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Key_To_config_Key(a.(*Key), b.(*config.Key), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.Key)(nil), (*Key)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_Key_To_v1_Key(a.(*config.Key), b.(*Key), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ProviderConfiguration)(nil), (*config.ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration(a.(*ProviderConfiguration), b.(*config.ProviderConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ProviderConfiguration)(nil), (*ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration(a.(*config.ProviderConfiguration), b.(*ProviderConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ResourceConfiguration)(nil), (*config.ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration(a.(*ResourceConfiguration), b.(*config.ResourceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ResourceConfiguration)(nil), (*ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration(a.(*config.ResourceConfiguration), b.(*ResourceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*SecretboxConfiguration)(nil), (*config.SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(a.(*SecretboxConfiguration), b.(*config.SecretboxConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.SecretboxConfiguration)(nil), (*SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(a.(*config.SecretboxConfiguration), b.(*SecretboxConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_AESConfiguration_To_config_AESConfiguration(in *AESConfiguration, out *config.AESConfiguration, s conversion.Scope) error {
out.Keys = *(*[]config.Key)(unsafe.Pointer(&in.Keys))
return nil
}
// Convert_v1_AESConfiguration_To_config_AESConfiguration is an autogenerated conversion function.
func Convert_v1_AESConfiguration_To_config_AESConfiguration(in *AESConfiguration, out *config.AESConfiguration, s conversion.Scope) error {
return autoConvert_v1_AESConfiguration_To_config_AESConfiguration(in, out, s)
}
func autoConvert_config_AESConfiguration_To_v1_AESConfiguration(in *config.AESConfiguration, out *AESConfiguration, s conversion.Scope) error {
out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys))
return nil
}
// Convert_config_AESConfiguration_To_v1_AESConfiguration is an autogenerated conversion function.
func Convert_config_AESConfiguration_To_v1_AESConfiguration(in *config.AESConfiguration, out *AESConfiguration, s conversion.Scope) error {
return autoConvert_config_AESConfiguration_To_v1_AESConfiguration(in, out, s)
}
func autoConvert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in *EncryptionConfiguration, out *config.EncryptionConfiguration, s conversion.Scope) error {
out.Resources = *(*[]config.ResourceConfiguration)(unsafe.Pointer(&in.Resources))
return nil
}
// Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration is an autogenerated conversion function.
func Convert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in *EncryptionConfiguration, out *config.EncryptionConfiguration, s conversion.Scope) error {
return autoConvert_v1_EncryptionConfiguration_To_config_EncryptionConfiguration(in, out, s)
}
func autoConvert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *config.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error {
out.Resources = *(*[]ResourceConfiguration)(unsafe.Pointer(&in.Resources))
return nil
}
// Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration is an autogenerated conversion function.
func Convert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *config.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error {
return autoConvert_config_EncryptionConfiguration_To_v1_EncryptionConfiguration(in, out, s)
}
func autoConvert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in *IdentityConfiguration, out *config.IdentityConfiguration, s conversion.Scope) error {
return nil
}
// Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration is an autogenerated conversion function.
func Convert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in *IdentityConfiguration, out *config.IdentityConfiguration, s conversion.Scope) error {
return autoConvert_v1_IdentityConfiguration_To_config_IdentityConfiguration(in, out, s)
}
func autoConvert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in *config.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error {
return nil
}
// Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration is an autogenerated conversion function.
func Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in *config.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error {
return autoConvert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in, out, s)
}
func autoConvert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration, out *config.KMSConfiguration, s conversion.Scope) error {
out.Name = in.Name
out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize))
out.Endpoint = in.Endpoint
out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout))
return nil
}
// Convert_v1_KMSConfiguration_To_config_KMSConfiguration is an autogenerated conversion function.
func Convert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration, out *config.KMSConfiguration, s conversion.Scope) error {
return autoConvert_v1_KMSConfiguration_To_config_KMSConfiguration(in, out, s)
}
func autoConvert_config_KMSConfiguration_To_v1_KMSConfiguration(in *config.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error {
out.Name = in.Name
out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize))
out.Endpoint = in.Endpoint
out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout))
return nil
}
// Convert_config_KMSConfiguration_To_v1_KMSConfiguration is an autogenerated conversion function.
func Convert_config_KMSConfiguration_To_v1_KMSConfiguration(in *config.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error {
return autoConvert_config_KMSConfiguration_To_v1_KMSConfiguration(in, out, s)
}
func autoConvert_v1_Key_To_config_Key(in *Key, out *config.Key, s conversion.Scope) error {
out.Name = in.Name
out.Secret = in.Secret
return nil
}
// Convert_v1_Key_To_config_Key is an autogenerated conversion function.
func Convert_v1_Key_To_config_Key(in *Key, out *config.Key, s conversion.Scope) error {
return autoConvert_v1_Key_To_config_Key(in, out, s)
}
func autoConvert_config_Key_To_v1_Key(in *config.Key, out *Key, s conversion.Scope) error {
out.Name = in.Name
out.Secret = in.Secret
return nil
}
// Convert_config_Key_To_v1_Key is an autogenerated conversion function.
func Convert_config_Key_To_v1_Key(in *config.Key, out *Key, s conversion.Scope) error {
return autoConvert_config_Key_To_v1_Key(in, out, s)
}
func autoConvert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in *ProviderConfiguration, out *config.ProviderConfiguration, s conversion.Scope) error {
out.AESGCM = (*config.AESConfiguration)(unsafe.Pointer(in.AESGCM))
out.AESCBC = (*config.AESConfiguration)(unsafe.Pointer(in.AESCBC))
out.Secretbox = (*config.SecretboxConfiguration)(unsafe.Pointer(in.Secretbox))
out.Identity = (*config.IdentityConfiguration)(unsafe.Pointer(in.Identity))
out.KMS = (*config.KMSConfiguration)(unsafe.Pointer(in.KMS))
return nil
}
// Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration is an autogenerated conversion function.
func Convert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in *ProviderConfiguration, out *config.ProviderConfiguration, s conversion.Scope) error {
return autoConvert_v1_ProviderConfiguration_To_config_ProviderConfiguration(in, out, s)
}
func autoConvert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in *config.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error {
out.AESGCM = (*AESConfiguration)(unsafe.Pointer(in.AESGCM))
out.AESCBC = (*AESConfiguration)(unsafe.Pointer(in.AESCBC))
out.Secretbox = (*SecretboxConfiguration)(unsafe.Pointer(in.Secretbox))
out.Identity = (*IdentityConfiguration)(unsafe.Pointer(in.Identity))
out.KMS = (*KMSConfiguration)(unsafe.Pointer(in.KMS))
return nil
}
// Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration is an autogenerated conversion function.
func Convert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in *config.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error {
return autoConvert_config_ProviderConfiguration_To_v1_ProviderConfiguration(in, out, s)
}
func autoConvert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in *ResourceConfiguration, out *config.ResourceConfiguration, s conversion.Scope) error {
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.Providers = *(*[]config.ProviderConfiguration)(unsafe.Pointer(&in.Providers))
return nil
}
// Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration is an autogenerated conversion function.
func Convert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in *ResourceConfiguration, out *config.ResourceConfiguration, s conversion.Scope) error {
return autoConvert_v1_ResourceConfiguration_To_config_ResourceConfiguration(in, out, s)
}
func autoConvert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in *config.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error {
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.Providers = *(*[]ProviderConfiguration)(unsafe.Pointer(&in.Providers))
return nil
}
// Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration is an autogenerated conversion function.
func Convert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in *config.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error {
return autoConvert_config_ResourceConfiguration_To_v1_ResourceConfiguration(in, out, s)
}
func autoConvert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in *SecretboxConfiguration, out *config.SecretboxConfiguration, s conversion.Scope) error {
out.Keys = *(*[]config.Key)(unsafe.Pointer(&in.Keys))
return nil
}
// Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration is an autogenerated conversion function.
func Convert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in *SecretboxConfiguration, out *config.SecretboxConfiguration, s conversion.Scope) error {
return autoConvert_v1_SecretboxConfiguration_To_config_SecretboxConfiguration(in, out, s)
}
func autoConvert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *config.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error {
out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys))
return nil
}
// Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration is an autogenerated conversion function.
func Convert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *config.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error {
return autoConvert_config_SecretboxConfiguration_To_v1_SecretboxConfiguration(in, out, s)
}

View File

@ -0,0 +1,227 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) {
*out = *in
if in.Keys != nil {
in, out := &in.Keys, &out.Keys
*out = make([]Key, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration.
func (in *AESConfiguration) DeepCopy() *AESConfiguration {
if in == nil {
return nil
}
out := new(AESConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]ResourceConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration.
func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration {
if in == nil {
return nil
}
out := new(EncryptionConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration.
func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration {
if in == nil {
return nil
}
out := new(IdentityConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) {
*out = *in
if in.CacheSize != nil {
in, out := &in.CacheSize, &out.CacheSize
*out = new(int32)
**out = **in
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration.
func (in *KMSConfiguration) DeepCopy() *KMSConfiguration {
if in == nil {
return nil
}
out := new(KMSConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Key) DeepCopyInto(out *Key) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key.
func (in *Key) DeepCopy() *Key {
if in == nil {
return nil
}
out := new(Key)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) {
*out = *in
if in.AESGCM != nil {
in, out := &in.AESGCM, &out.AESGCM
*out = new(AESConfiguration)
(*in).DeepCopyInto(*out)
}
if in.AESCBC != nil {
in, out := &in.AESCBC, &out.AESCBC
*out = new(AESConfiguration)
(*in).DeepCopyInto(*out)
}
if in.Secretbox != nil {
in, out := &in.Secretbox, &out.Secretbox
*out = new(SecretboxConfiguration)
(*in).DeepCopyInto(*out)
}
if in.Identity != nil {
in, out := &in.Identity, &out.Identity
*out = new(IdentityConfiguration)
**out = **in
}
if in.KMS != nil {
in, out := &in.KMS, &out.KMS
*out = new(KMSConfiguration)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration.
func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration {
if in == nil {
return nil
}
out := new(ProviderConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Providers != nil {
in, out := &in.Providers, &out.Providers
*out = make([]ProviderConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration.
func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration {
if in == nil {
return nil
}
out := new(ResourceConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) {
*out = *in
if in.Keys != nil {
in, out := &in.Keys, &out.Keys
*out = make([]Key, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration.
func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration {
if in == nil {
return nil
}
out := new(SecretboxConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,45 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&EncryptionConfiguration{}, func(obj interface{}) { SetObjectDefaults_EncryptionConfiguration(obj.(*EncryptionConfiguration)) })
return nil
}
func SetObjectDefaults_EncryptionConfiguration(in *EncryptionConfiguration) {
for i := range in.Resources {
a := &in.Resources[i]
for j := range a.Providers {
b := &a.Providers[j]
if b.KMS != nil {
SetDefaults_KMSConfiguration(b.KMS)
}
}
}
}

View File

@ -0,0 +1,220 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package validation validates EncryptionConfiguration.
package validation
import (
"encoding/base64"
"fmt"
"net/url"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/apis/config"
)
const (
moreThanOneElementErr = "more than one provider specified in a single element, should split into different list elements"
keyLenErrFmt = "secret is not of the expected length, got %d, expected one of %v"
unsupportedSchemeErrFmt = "unsupported scheme %q for KMS provider, only unix is supported"
atLeastOneRequiredErrFmt = "at least one %s is required"
invalidURLErrFmt = "invalid endpoint for kms provider, error: parse %s: net/url: invalid control character in URL"
mandatoryFieldErrFmt = "%s is a mandatory field for a %s"
base64EncodingErr = "secrets must be base64 encoded"
zeroOrNegativeErrFmt = "%s should be a positive value"
nonZeroErrFmt = "%s should be a positive value, or negative to disable"
encryptionConfigNilErr = "EncryptionConfiguration can't be nil"
)
var (
aesKeySizes = []int{16, 24, 32}
// See https://golang.org/pkg/crypto/aes/#NewCipher for details on supported key sizes for AES.
secretBoxKeySizes = []int{32}
// See https://godoc.org/golang.org/x/crypto/nacl/secretbox#Open for details on the supported key sizes for Secretbox.
root = field.NewPath("resources")
)
// ValidateEncryptionConfiguration validates a v1.EncryptionConfiguration.
func ValidateEncryptionConfiguration(c *config.EncryptionConfiguration) field.ErrorList {
allErrs := field.ErrorList{}
if c == nil {
allErrs = append(allErrs, field.Required(root, "EncryptionConfiguration can't be nil"))
return allErrs
}
if len(c.Resources) == 0 {
allErrs = append(allErrs, field.Required(root, fmt.Sprintf(atLeastOneRequiredErrFmt, root)))
return allErrs
}
for i, conf := range c.Resources {
r := root.Index(i).Child("resources")
p := root.Index(i).Child("providers")
if len(conf.Resources) == 0 {
allErrs = append(allErrs, field.Required(r, fmt.Sprintf(atLeastOneRequiredErrFmt, r)))
}
if len(conf.Providers) == 0 {
allErrs = append(allErrs, field.Required(p, fmt.Sprintf(atLeastOneRequiredErrFmt, p)))
}
for j, provider := range conf.Providers {
path := p.Index(j)
allErrs = append(allErrs, validateSingleProvider(provider, path)...)
switch {
case provider.KMS != nil:
allErrs = append(allErrs, validateKMSConfiguration(provider.KMS, path.Child("kms"))...)
case provider.AESGCM != nil:
allErrs = append(allErrs, validateKeys(provider.AESGCM.Keys, path.Child("aesgcm").Child("keys"), aesKeySizes)...)
case provider.AESCBC != nil:
allErrs = append(allErrs, validateKeys(provider.AESCBC.Keys, path.Child("aescbc").Child("keys"), aesKeySizes)...)
case provider.Secretbox != nil:
allErrs = append(allErrs, validateKeys(provider.Secretbox.Keys, path.Child("secretbox").Child("keys"), secretBoxKeySizes)...)
}
}
}
return allErrs
}
func validateSingleProvider(provider config.ProviderConfiguration, filedPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
found := 0
if provider.KMS != nil {
found++
}
if provider.AESGCM != nil {
found++
}
if provider.AESCBC != nil {
found++
}
if provider.Secretbox != nil {
found++
}
if provider.Identity != nil {
found++
}
if found == 0 {
return append(allErrs, field.Invalid(filedPath, provider, "provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity"))
}
if found > 1 {
return append(allErrs, field.Invalid(filedPath, provider, moreThanOneElementErr))
}
return allErrs
}
func validateKeys(keys []config.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList {
allErrs := field.ErrorList{}
if len(keys) == 0 {
allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf(atLeastOneRequiredErrFmt, "keys")))
return allErrs
}
for i, key := range keys {
allErrs = append(allErrs, validateKey(key, fieldPath.Index(i), expectedLen)...)
}
return allErrs
}
func validateKey(key config.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList {
allErrs := field.ErrorList{}
if key.Name == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), fmt.Sprintf(mandatoryFieldErrFmt, "name", "key")))
}
if key.Secret == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("secret"), fmt.Sprintf(mandatoryFieldErrFmt, "secret", "key")))
return allErrs
}
secret, err := base64.StdEncoding.DecodeString(key.Secret)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", base64EncodingErr))
return allErrs
}
lenMatched := false
for _, l := range expectedLen {
if len(secret) == l {
lenMatched = true
break
}
}
if !lenMatched {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", fmt.Sprintf(keyLenErrFmt, len(secret), expectedLen)))
}
return allErrs
}
func validateKMSConfiguration(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if c.Name == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("name"), fmt.Sprintf(mandatoryFieldErrFmt, "name", "provider")))
}
allErrs = append(allErrs, validateKMSTimeout(c, fieldPath.Child("timeout"))...)
allErrs = append(allErrs, validateKMSEndpoint(c, fieldPath.Child("endpoint"))...)
allErrs = append(allErrs, validateKMSCacheSize(c, fieldPath.Child("cachesize"))...)
return allErrs
}
func validateKMSCacheSize(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if *c.CacheSize == 0 {
allErrs = append(allErrs, field.Invalid(fieldPath, *c.CacheSize, fmt.Sprintf(nonZeroErrFmt, "cachesize")))
}
return allErrs
}
func validateKMSTimeout(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if c.Timeout.Duration <= 0 {
allErrs = append(allErrs, field.Invalid(fieldPath, c.Timeout, fmt.Sprintf(zeroOrNegativeErrFmt, "timeout")))
}
return allErrs
}
func validateKMSEndpoint(c *config.KMSConfiguration, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(c.Endpoint) == 0 {
return append(allErrs, field.Invalid(fieldPath, "", fmt.Sprintf(mandatoryFieldErrFmt, "endpoint", "kms")))
}
u, err := url.Parse(c.Endpoint)
if err != nil {
return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf("invalid endpoint for kms provider, error: %v", err)))
}
if u.Scheme != "unix" {
return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf(unsupportedSchemeErrFmt, u.Scheme)))
}
return allErrs
}

View File

@ -0,0 +1,227 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) {
*out = *in
if in.Keys != nil {
in, out := &in.Keys, &out.Keys
*out = make([]Key, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration.
func (in *AESConfiguration) DeepCopy() *AESConfiguration {
if in == nil {
return nil
}
out := new(AESConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]ResourceConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration.
func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration {
if in == nil {
return nil
}
out := new(EncryptionConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration.
func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration {
if in == nil {
return nil
}
out := new(IdentityConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) {
*out = *in
if in.CacheSize != nil {
in, out := &in.CacheSize, &out.CacheSize
*out = new(int32)
**out = **in
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration.
func (in *KMSConfiguration) DeepCopy() *KMSConfiguration {
if in == nil {
return nil
}
out := new(KMSConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Key) DeepCopyInto(out *Key) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key.
func (in *Key) DeepCopy() *Key {
if in == nil {
return nil
}
out := new(Key)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) {
*out = *in
if in.AESGCM != nil {
in, out := &in.AESGCM, &out.AESGCM
*out = new(AESConfiguration)
(*in).DeepCopyInto(*out)
}
if in.AESCBC != nil {
in, out := &in.AESCBC, &out.AESCBC
*out = new(AESConfiguration)
(*in).DeepCopyInto(*out)
}
if in.Secretbox != nil {
in, out := &in.Secretbox, &out.Secretbox
*out = new(SecretboxConfiguration)
(*in).DeepCopyInto(*out)
}
if in.Identity != nil {
in, out := &in.Identity, &out.Identity
*out = new(IdentityConfiguration)
**out = **in
}
if in.KMS != nil {
in, out := &in.KMS, &out.KMS
*out = new(KMSConfiguration)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration.
func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration {
if in == nil {
return nil
}
out := new(ProviderConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Providers != nil {
in, out := &in.Providers, &out.Providers
*out = make([]ProviderConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration.
func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration {
if in == nil {
return nil
}
out := new(ResourceConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) {
*out = *in
if in.Keys != nil {
in, out := &in.Keys, &out.Keys
*out = make([]Key, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration.
func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration {
if in == nil {
return nil
}
out := new(SecretboxConfiguration)
in.DeepCopyInto(out)
return out
}

18
vendor/k8s.io/apiserver/pkg/authorization/path/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package path contains an authorizer that allows certain paths and path prefixes.
package path // import "k8s.io/apiserver/pkg/authorization/path"

68
vendor/k8s.io/apiserver/pkg/authorization/path/path.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package path
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/authorization/authorizer"
)
// NewAuthorizer returns an authorizer which accepts a given set of paths.
// Each path is either a fully matching path or it ends in * in case a prefix match is done. A leading / is optional.
func NewAuthorizer(alwaysAllowPaths []string) (authorizer.Authorizer, error) {
var prefixes []string
paths := sets.NewString()
for _, p := range alwaysAllowPaths {
p = strings.TrimPrefix(p, "/")
if len(p) == 0 {
// matches "/"
paths.Insert(p)
continue
}
if strings.ContainsRune(p[:len(p)-1], '*') {
return nil, fmt.Errorf("only trailing * allowed in %q", p)
}
if strings.HasSuffix(p, "*") {
prefixes = append(prefixes, p[:len(p)-1])
} else {
paths.Insert(p)
}
}
return authorizer.AuthorizerFunc(func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
if a.IsResourceRequest() {
return authorizer.DecisionNoOpinion, "", nil
}
pth := strings.TrimPrefix(a.GetPath(), "/")
if paths.Has(pth) {
return authorizer.DecisionAllow, "", nil
}
for _, prefix := range prefixes {
if strings.HasPrefix(pth, prefix) {
return authorizer.DecisionAllow, "", nil
}
}
return authorizer.DecisionNoOpinion, "", nil
}), nil
}

View File

@ -0,0 +1,91 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
)
type decoratedWatcher struct {
w watch.Interface
decorator func(runtime.Object)
cancel context.CancelFunc
resultCh chan watch.Event
}
func newDecoratedWatcher(ctx context.Context, w watch.Interface, decorator func(runtime.Object)) *decoratedWatcher {
ctx, cancel := context.WithCancel(ctx)
d := &decoratedWatcher{
w: w,
decorator: decorator,
cancel: cancel,
resultCh: make(chan watch.Event),
}
go d.run(ctx)
return d
}
// run decorates watch events from the underlying watcher until its result channel
// is closed or the passed in context is done.
// When run() returns, decoratedWatcher#resultCh is closed.
func (d *decoratedWatcher) run(ctx context.Context) {
var recv, send watch.Event
var ok bool
defer close(d.resultCh)
for {
select {
case recv, ok = <-d.w.ResultChan():
if !ok {
// The underlying channel was closed, cancel our context
d.cancel()
return
}
switch recv.Type {
case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark:
d.decorator(recv.Object)
send = recv
case watch.Error:
send = recv
}
select {
case d.resultCh <- send:
// propagated event successfully
case <-ctx.Done():
// context timed out or was cancelled, stop the underlying watcher
d.w.Stop()
return
}
case <-ctx.Done():
// context timed out or was cancelled, stop the underlying watcher
d.w.Stop()
return
}
}
}
func (d *decoratedWatcher) Stop() {
// stop the underlying watcher
d.w.Stop()
// cancel our context
d.cancel()
}
func (d *decoratedWatcher) ResultChan() <-chan watch.Event {
return d.resultCh
}

View File

@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package etcd has a generic implementation of a registry that
// stores things in etcd.
package registry // import "k8s.io/apiserver/pkg/registry/generic/registry"

View File

@ -0,0 +1,121 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/storage"
)
type DryRunnableStorage struct {
Storage storage.Interface
Codec runtime.Codec
}
func (s *DryRunnableStorage) Versioner() storage.Versioner {
return s.Storage.Versioner()
}
func (s *DryRunnableStorage) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64, dryRun bool) error {
if dryRun {
if err := s.Storage.Get(ctx, key, storage.GetOptions{}, out); err == nil {
return storage.NewKeyExistsError(key, 0)
}
return s.copyInto(obj, out)
}
return s.Storage.Create(ctx, key, obj, out, ttl)
}
func (s *DryRunnableStorage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, deleteValidation storage.ValidateObjectFunc, dryRun bool, cachedExistingObject runtime.Object) error {
if dryRun {
if err := s.Storage.Get(ctx, key, storage.GetOptions{}, out); err != nil {
return err
}
if err := preconditions.Check(key, out); err != nil {
return err
}
return deleteValidation(ctx, out)
}
return s.Storage.Delete(ctx, key, out, preconditions, deleteValidation, cachedExistingObject)
}
func (s *DryRunnableStorage) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.Storage.Watch(ctx, key, opts)
}
func (s *DryRunnableStorage) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.Storage.WatchList(ctx, key, opts)
}
func (s *DryRunnableStorage) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error {
return s.Storage.Get(ctx, key, opts, objPtr)
}
func (s *DryRunnableStorage) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
return s.Storage.GetToList(ctx, key, opts, listObj)
}
func (s *DryRunnableStorage) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
return s.Storage.List(ctx, key, opts, listObj)
}
func (s *DryRunnableStorage) GuaranteedUpdate(
ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, cachedExistingObject runtime.Object) error {
if dryRun {
err := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, ptrToType)
if err != nil {
return err
}
err = preconditions.Check(key, ptrToType)
if err != nil {
return err
}
rev, err := s.Versioner().ObjectResourceVersion(ptrToType)
if err != nil {
return err
}
out, _, err := tryUpdate(ptrToType, storage.ResponseMeta{ResourceVersion: rev})
if err != nil {
return err
}
return s.copyInto(out, ptrToType)
}
return s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, cachedExistingObject)
}
func (s *DryRunnableStorage) Count(key string) (int64, error) {
return s.Storage.Count(key)
}
func (s *DryRunnableStorage) copyInto(in, out runtime.Object) error {
var data []byte
data, err := runtime.Encode(s.Codec, in)
if err != nil {
return err
}
_, _, err = s.Codec.Decode(data, nil, out)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,138 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"fmt"
"sync"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/storage"
cacherstorage "k8s.io/apiserver/pkg/storage/cacher"
"k8s.io/apiserver/pkg/storage/etcd3"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/storagebackend/factory"
"k8s.io/client-go/tools/cache"
)
// Creates a cacher based given storageConfig.
func StorageWithCacher() generic.StorageDecorator {
return func(
storageConfig *storagebackend.Config,
resourcePrefix string,
keyFunc func(obj runtime.Object) (string, error),
newFunc func() runtime.Object,
newListFunc func() runtime.Object,
getAttrsFunc storage.AttrFunc,
triggerFuncs storage.IndexerFuncs,
indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) {
s, d, err := generic.NewRawStorage(storageConfig, newFunc)
if err != nil {
return s, d, err
}
if klog.V(5).Enabled() {
klog.InfoS("Storage caching is enabled", objectTypeToArgs(newFunc())...)
}
cacherConfig := cacherstorage.Config{
Storage: s,
Versioner: etcd3.APIObjectVersioner{},
ResourcePrefix: resourcePrefix,
KeyFunc: keyFunc,
NewFunc: newFunc,
NewListFunc: newListFunc,
GetAttrsFunc: getAttrsFunc,
IndexerFuncs: triggerFuncs,
Indexers: indexers,
Codec: storageConfig.Codec,
}
cacher, err := cacherstorage.NewCacherFromConfig(cacherConfig)
if err != nil {
return nil, func() {}, err
}
destroyFunc := func() {
cacher.Stop()
d()
}
// TODO : Remove RegisterStorageCleanup below when PR
// https://github.com/kubernetes/kubernetes/pull/50690
// merges as that shuts down storage properly
RegisterStorageCleanup(destroyFunc)
return cacher, destroyFunc, nil
}
}
func objectTypeToArgs(obj runtime.Object) []interface{} {
// special-case unstructured objects that tell us their apiVersion/kind
if u, isUnstructured := obj.(*unstructured.Unstructured); isUnstructured {
if apiVersion, kind := u.GetAPIVersion(), u.GetKind(); len(apiVersion) > 0 && len(kind) > 0 {
return []interface{}{"apiVersion", apiVersion, "kind", kind}
}
}
// otherwise just return the type
return []interface{}{"type", fmt.Sprintf("%T", obj)}
}
// TODO : Remove all the code below when PR
// https://github.com/kubernetes/kubernetes/pull/50690
// merges as that shuts down storage properly
// HACK ALERT : Track the destroy methods to call them
// from the test harness. TrackStorageCleanup will be called
// only from the test harness, so Register/Cleanup will be
// no-op at runtime.
var cleanupLock sync.Mutex
var cleanup []func() = nil
func TrackStorageCleanup() {
cleanupLock.Lock()
defer cleanupLock.Unlock()
if cleanup != nil {
panic("Conflicting storage tracking")
}
cleanup = make([]func(), 0)
}
func RegisterStorageCleanup(fn func()) {
cleanupLock.Lock()
defer cleanupLock.Unlock()
if cleanup == nil {
return
}
cleanup = append(cleanup, fn)
}
func CleanupStorage() {
cleanupLock.Lock()
old := cleanup
cleanup = nil
cleanupLock.Unlock()
for _, d := range old {
d()
}
}

File diff suppressed because it is too large Load Diff

15
vendor/k8s.io/apiserver/pkg/server/options/OWNERS generated vendored Normal file
View File

@ -0,0 +1,15 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- smarterclayton
- wojtek-t
- deads2k
- liggitt
- sttts
- jlowdermilk
- soltysh
- dims
- cjcullen
- ping035627
- xiangpengzhao
- enj

234
vendor/k8s.io/apiserver/pkg/server/options/admission.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"strings"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/admission/initializer"
admissionmetrics "k8s.io/apiserver/pkg/admission/metrics"
"k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle"
mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating"
validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating"
apiserverapi "k8s.io/apiserver/pkg/apis/apiserver"
apiserverapiv1 "k8s.io/apiserver/pkg/apis/apiserver/v1"
apiserverapiv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
"k8s.io/apiserver/pkg/server"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/component-base/featuregate"
)
var configScheme = runtime.NewScheme()
func init() {
utilruntime.Must(apiserverapi.AddToScheme(configScheme))
utilruntime.Must(apiserverapiv1alpha1.AddToScheme(configScheme))
utilruntime.Must(apiserverapiv1.AddToScheme(configScheme))
}
// AdmissionOptions holds the admission options
type AdmissionOptions struct {
// RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default
RecommendedPluginOrder []string
// DefaultOffPlugins is a set of plugin names that is disabled by default
DefaultOffPlugins sets.String
// EnablePlugins indicates plugins to be enabled passed through `--enable-admission-plugins`.
EnablePlugins []string
// DisablePlugins indicates plugins to be disabled passed through `--disable-admission-plugins`.
DisablePlugins []string
// ConfigFile is the file path with admission control configuration.
ConfigFile string
// Plugins contains all registered plugins.
Plugins *admission.Plugins
// Decorators is a list of admission decorator to wrap around the admission plugins
Decorators admission.Decorators
}
// NewAdmissionOptions creates a new instance of AdmissionOptions
// Note:
// In addition it calls RegisterAllAdmissionPlugins to register
// all generic admission plugins.
//
// Provides the list of RecommendedPluginOrder that holds sane values
// that can be used by servers that don't care about admission chain.
// Servers that do care can overwrite/append that field after creation.
func NewAdmissionOptions() *AdmissionOptions {
options := &AdmissionOptions{
Plugins: admission.NewPlugins(),
Decorators: admission.Decorators{admission.DecoratorFunc(admissionmetrics.WithControllerMetrics)},
// This list is mix of mutating admission plugins and validating
// admission plugins. The apiserver always runs the validating ones
// after all the mutating ones, so their relative order in this list
// doesn't matter.
RecommendedPluginOrder: []string{lifecycle.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName},
DefaultOffPlugins: sets.NewString(),
}
server.RegisterAllAdmissionPlugins(options.Plugins)
return options
}
// AddFlags adds flags related to admission for a specific APIServer to the specified FlagSet
func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) {
if a == nil {
return
}
fs.StringSliceVar(&a.EnablePlugins, "enable-admission-plugins", a.EnablePlugins, ""+
"admission plugins that should be enabled in addition to default enabled ones ("+
strings.Join(a.defaultEnabledPluginNames(), ", ")+"). "+
"Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+
"The order of plugins in this flag does not matter.")
fs.StringSliceVar(&a.DisablePlugins, "disable-admission-plugins", a.DisablePlugins, ""+
"admission plugins that should be disabled although they are in the default enabled plugins list ("+
strings.Join(a.defaultEnabledPluginNames(), ", ")+"). "+
"Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+
"The order of plugins in this flag does not matter.")
fs.StringVar(&a.ConfigFile, "admission-control-config-file", a.ConfigFile,
"File with admission control configuration.")
}
// ApplyTo adds the admission chain to the server configuration.
// In case admission plugin names were not provided by a cluster-admin they will be prepared from the recommended/default values.
// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers
// note this method uses:
// genericconfig.Authorizer
func (a *AdmissionOptions) ApplyTo(
c *server.Config,
informers informers.SharedInformerFactory,
kubeAPIServerClientConfig *rest.Config,
features featuregate.FeatureGate,
pluginInitializers ...admission.PluginInitializer,
) error {
if a == nil {
return nil
}
// Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig.
if informers == nil {
return fmt.Errorf("admission depends on a Kubernetes core API shared informer, it cannot be nil")
}
pluginNames := a.enabledPluginNames()
pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, configScheme)
if err != nil {
return fmt.Errorf("failed to read plugin config: %v", err)
}
clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig)
if err != nil {
return err
}
genericInitializer := initializer.New(clientset, informers, c.Authorization.Authorizer, features)
initializersChain := admission.PluginInitializers{}
pluginInitializers = append(pluginInitializers, genericInitializer)
initializersChain = append(initializersChain, pluginInitializers...)
admissionChain, err := a.Plugins.NewFromPlugins(pluginNames, pluginsConfigProvider, initializersChain, a.Decorators)
if err != nil {
return err
}
c.AdmissionControl = admissionmetrics.WithStepMetrics(admissionChain)
return nil
}
// Validate verifies flags passed to AdmissionOptions.
func (a *AdmissionOptions) Validate() []error {
if a == nil {
return nil
}
errs := []error{}
registeredPlugins := sets.NewString(a.Plugins.Registered()...)
for _, name := range a.EnablePlugins {
if !registeredPlugins.Has(name) {
errs = append(errs, fmt.Errorf("enable-admission-plugins plugin %q is unknown", name))
}
}
for _, name := range a.DisablePlugins {
if !registeredPlugins.Has(name) {
errs = append(errs, fmt.Errorf("disable-admission-plugins plugin %q is unknown", name))
}
}
enablePlugins := sets.NewString(a.EnablePlugins...)
disablePlugins := sets.NewString(a.DisablePlugins...)
if len(enablePlugins.Intersection(disablePlugins).List()) > 0 {
errs = append(errs, fmt.Errorf("%v in enable-admission-plugins and disable-admission-plugins "+
"overlapped", enablePlugins.Intersection(disablePlugins).List()))
}
// Verify RecommendedPluginOrder.
recommendPlugins := sets.NewString(a.RecommendedPluginOrder...)
intersections := registeredPlugins.Intersection(recommendPlugins)
if !intersections.Equal(recommendPlugins) {
// Developer error, this should never run in.
errs = append(errs, fmt.Errorf("plugins %v in RecommendedPluginOrder are not registered",
recommendPlugins.Difference(intersections).List()))
}
if !intersections.Equal(registeredPlugins) {
// Developer error, this should never run in.
errs = append(errs, fmt.Errorf("plugins %v registered are not in RecommendedPluginOrder",
registeredPlugins.Difference(intersections).List()))
}
return errs
}
// enabledPluginNames makes use of RecommendedPluginOrder, DefaultOffPlugins,
// EnablePlugins, DisablePlugins fields
// to prepare a list of ordered plugin names that are enabled.
func (a *AdmissionOptions) enabledPluginNames() []string {
allOffPlugins := append(a.DefaultOffPlugins.List(), a.DisablePlugins...)
disabledPlugins := sets.NewString(allOffPlugins...)
enabledPlugins := sets.NewString(a.EnablePlugins...)
disabledPlugins = disabledPlugins.Difference(enabledPlugins)
orderedPlugins := []string{}
for _, plugin := range a.RecommendedPluginOrder {
if !disabledPlugins.Has(plugin) {
orderedPlugins = append(orderedPlugins, plugin)
}
}
return orderedPlugins
}
//Return names of plugins which are enabled by default
func (a *AdmissionOptions) defaultEnabledPluginNames() []string {
defaultOnPluginNames := []string{}
for _, pluginName := range a.RecommendedPluginOrder {
if !a.DefaultOffPlugins.Has(pluginName) {
defaultOnPluginNames = append(defaultOnPluginNames, pluginName)
}
}
return defaultOnPluginNames
}

View File

@ -0,0 +1,115 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"strings"
"github.com/spf13/pflag"
"k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/resourceconfig"
serverstore "k8s.io/apiserver/pkg/server/storage"
cliflag "k8s.io/component-base/cli/flag"
)
// APIEnablementOptions contains the options for which resources to turn on and off.
// Given small aggregated API servers, this option isn't required for "normal" API servers
type APIEnablementOptions struct {
RuntimeConfig cliflag.ConfigurationMap
}
func NewAPIEnablementOptions() *APIEnablementOptions {
return &APIEnablementOptions{
RuntimeConfig: make(cliflag.ConfigurationMap),
}
}
// AddFlags adds flags for a specific APIServer to the specified FlagSet
func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) {
fs.Var(&s.RuntimeConfig, "runtime-config", ""+
"A set of key=value pairs that enable or disable built-in APIs. Supported options are:\n"+
"v1=true|false for the core API group\n"+
"<group>/<version>=true|false for a specific API group and version (e.g. apps/v1=true)\n"+
"api/all=true|false controls all API versions\n"+
"api/ga=true|false controls all API versions of the form v[0-9]+\n"+
"api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+\n"+
"api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+\n"+
"api/legacy is deprecated, and will be removed in a future version")
}
// Validate validates RuntimeConfig with a list of registries.
// Usually this list only has one element, the apiserver registry of the process.
// But in the advanced (and usually not recommended) case of delegated apiservers there can be more.
// Validate will filter out the known groups of each registry.
// If anything is left over after that, an error is returned.
func (s *APIEnablementOptions) Validate(registries ...GroupRegisty) []error {
if s == nil {
return nil
}
errors := []error{}
if s.RuntimeConfig[resourceconfig.APIAll] == "false" && len(s.RuntimeConfig) == 1 {
// Do not allow only set api/all=false, in such case apiserver startup has no meaning.
return append(errors, fmt.Errorf("invalid key with only %v=false", resourceconfig.APIAll))
}
groups, err := resourceconfig.ParseGroups(s.RuntimeConfig)
if err != nil {
return append(errors, err)
}
for _, registry := range registries {
// filter out known groups
groups = unknownGroups(groups, registry)
}
if len(groups) != 0 {
errors = append(errors, fmt.Errorf("unknown api groups %s", strings.Join(groups, ",")))
}
return errors
}
// ApplyTo override MergedResourceConfig with defaults and registry
func (s *APIEnablementOptions) ApplyTo(c *server.Config, defaultResourceConfig *serverstore.ResourceConfig, registry resourceconfig.GroupVersionRegistry) error {
if s == nil {
return nil
}
mergedResourceConfig, err := resourceconfig.MergeAPIResourceConfigs(defaultResourceConfig, s.RuntimeConfig, registry)
c.MergedResourceConfig = mergedResourceConfig
return err
}
func unknownGroups(groups []string, registry GroupRegisty) []string {
unknownGroups := []string{}
for _, group := range groups {
if !registry.IsGroupRegistered(group) {
unknownGroups = append(unknownGroups, group)
}
}
return unknownGroups
}
// GroupRegisty provides a method to check whether given group is registered.
type GroupRegisty interface {
// IsRegistered returns true if given group is registered.
IsGroupRegistered(group string) bool
}

622
vendor/k8s.io/apiserver/pkg/server/options/audit.go generated vendored Normal file
View File

@ -0,0 +1,622 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"io"
"os"
"strings"
"time"
"github.com/spf13/pflag"
"gopkg.in/natefinch/lumberjack.v2"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
auditv1alpha1 "k8s.io/apiserver/pkg/apis/audit/v1alpha1"
auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/audit/policy"
"k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/egressselector"
"k8s.io/apiserver/pkg/util/webhook"
pluginbuffered "k8s.io/apiserver/plugin/pkg/audit/buffered"
pluginlog "k8s.io/apiserver/plugin/pkg/audit/log"
plugintruncate "k8s.io/apiserver/plugin/pkg/audit/truncate"
pluginwebhook "k8s.io/apiserver/plugin/pkg/audit/webhook"
)
const (
// Default configuration values for ModeBatch.
defaultBatchBufferSize = 10000 // Buffer up to 10000 events before starting discarding.
// These batch parameters are only used by the webhook backend.
defaultBatchMaxSize = 400 // Only send up to 400 events at a time.
defaultBatchMaxWait = 30 * time.Second // Send events at least twice a minute.
defaultBatchThrottleQPS = 10 // Limit the send rate by 10 QPS.
defaultBatchThrottleBurst = 15 // Allow up to 15 QPS burst.
)
func appendBackend(existing, newBackend audit.Backend) audit.Backend {
if existing == nil {
return newBackend
}
if newBackend == nil {
return existing
}
return audit.Union(existing, newBackend)
}
type AuditOptions struct {
// Policy configuration file for filtering audit events that are captured.
// If unspecified, a default is provided.
PolicyFile string
// Plugin options
LogOptions AuditLogOptions
WebhookOptions AuditWebhookOptions
}
const (
// ModeBatch indicates that the audit backend should buffer audit events
// internally, sending batch updates either once a certain number of
// events have been received or a certain amount of time has passed.
ModeBatch = "batch"
// ModeBlocking causes the audit backend to block on every attempt to process
// a set of events. This causes requests to the API server to wait for the
// flush before sending a response.
ModeBlocking = "blocking"
// ModeBlockingStrict is the same as ModeBlocking, except when there is
// a failure during audit logging at RequestReceived stage, the whole
// request to apiserver will fail.
ModeBlockingStrict = "blocking-strict"
)
// AllowedModes is the modes known for audit backends.
var AllowedModes = []string{
ModeBatch,
ModeBlocking,
ModeBlockingStrict,
}
type AuditBatchOptions struct {
// Should the backend asynchronous batch events to the webhook backend or
// should the backend block responses?
//
// Defaults to asynchronous batch events.
Mode string
// Configuration for batching backend. Only used in batch mode.
BatchConfig pluginbuffered.BatchConfig
}
type AuditTruncateOptions struct {
// Whether truncating is enabled or not.
Enabled bool
// Truncating configuration.
TruncateConfig plugintruncate.Config
}
// AuditLogOptions determines the output of the structured audit log by default.
type AuditLogOptions struct {
Path string
MaxAge int
MaxBackups int
MaxSize int
Format string
Compress bool
BatchOptions AuditBatchOptions
TruncateOptions AuditTruncateOptions
// API group version used for serializing audit events.
GroupVersionString string
}
// AuditWebhookOptions control the webhook configuration for audit events.
type AuditWebhookOptions struct {
ConfigFile string
InitialBackoff time.Duration
BatchOptions AuditBatchOptions
TruncateOptions AuditTruncateOptions
// API group version used for serializing audit events.
GroupVersionString string
}
// AuditDynamicOptions control the configuration of dynamic backends for audit events
type AuditDynamicOptions struct {
// Enabled tells whether the dynamic audit capability is enabled.
Enabled bool
// Configuration for batching backend. This is currently only used as an override
// for integration tests
BatchConfig *pluginbuffered.BatchConfig
}
func NewAuditOptions() *AuditOptions {
return &AuditOptions{
WebhookOptions: AuditWebhookOptions{
InitialBackoff: pluginwebhook.DefaultInitialBackoffDelay,
BatchOptions: AuditBatchOptions{
Mode: ModeBatch,
BatchConfig: defaultWebhookBatchConfig(),
},
TruncateOptions: NewAuditTruncateOptions(),
GroupVersionString: "audit.k8s.io/v1",
},
LogOptions: AuditLogOptions{
Format: pluginlog.FormatJson,
BatchOptions: AuditBatchOptions{
Mode: ModeBlocking,
BatchConfig: defaultLogBatchConfig(),
},
TruncateOptions: NewAuditTruncateOptions(),
GroupVersionString: "audit.k8s.io/v1",
},
}
}
func NewAuditTruncateOptions() AuditTruncateOptions {
return AuditTruncateOptions{
Enabled: false,
TruncateConfig: plugintruncate.Config{
MaxBatchSize: 10 * 1024 * 1024, // 10MB
MaxEventSize: 100 * 1024, // 100KB
},
}
}
// Validate checks invalid config combination
func (o *AuditOptions) Validate() []error {
if o == nil {
return nil
}
var allErrors []error
allErrors = append(allErrors, o.LogOptions.Validate()...)
allErrors = append(allErrors, o.WebhookOptions.Validate()...)
return allErrors
}
func validateBackendMode(pluginName string, mode string) error {
for _, m := range AllowedModes {
if m == mode {
return nil
}
}
return fmt.Errorf("invalid audit %s mode %s, allowed modes are %q", pluginName, mode, strings.Join(AllowedModes, ","))
}
func validateBackendBatchOptions(pluginName string, options AuditBatchOptions) error {
if err := validateBackendMode(pluginName, options.Mode); err != nil {
return err
}
if options.Mode != ModeBatch {
// Don't validate the unused options.
return nil
}
config := options.BatchConfig
if config.BufferSize <= 0 {
return fmt.Errorf("invalid audit batch %s buffer size %v, must be a positive number", pluginName, config.BufferSize)
}
if config.MaxBatchSize <= 0 {
return fmt.Errorf("invalid audit batch %s max batch size %v, must be a positive number", pluginName, config.MaxBatchSize)
}
if config.ThrottleEnable {
if config.ThrottleQPS <= 0 {
return fmt.Errorf("invalid audit batch %s throttle QPS %v, must be a positive number", pluginName, config.ThrottleQPS)
}
if config.ThrottleBurst <= 0 {
return fmt.Errorf("invalid audit batch %s throttle burst %v, must be a positive number", pluginName, config.ThrottleBurst)
}
}
return nil
}
var knownGroupVersions = []schema.GroupVersion{
auditv1alpha1.SchemeGroupVersion,
auditv1beta1.SchemeGroupVersion,
auditv1.SchemeGroupVersion,
}
func validateGroupVersionString(groupVersion string) error {
gv, err := schema.ParseGroupVersion(groupVersion)
if err != nil {
return err
}
if !knownGroupVersion(gv) {
return fmt.Errorf("invalid group version, allowed versions are %q", knownGroupVersions)
}
if gv != auditv1.SchemeGroupVersion {
klog.Warningf("%q is deprecated and will be removed in a future release, use %q instead", gv, auditv1.SchemeGroupVersion)
}
return nil
}
func knownGroupVersion(gv schema.GroupVersion) bool {
for _, knownGv := range knownGroupVersions {
if gv == knownGv {
return true
}
}
return false
}
func (o *AuditOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
fs.StringVar(&o.PolicyFile, "audit-policy-file", o.PolicyFile,
"Path to the file that defines the audit policy configuration.")
o.LogOptions.AddFlags(fs)
o.LogOptions.BatchOptions.AddFlags(pluginlog.PluginName, fs)
o.LogOptions.TruncateOptions.AddFlags(pluginlog.PluginName, fs)
o.WebhookOptions.AddFlags(fs)
o.WebhookOptions.BatchOptions.AddFlags(pluginwebhook.PluginName, fs)
o.WebhookOptions.TruncateOptions.AddFlags(pluginwebhook.PluginName, fs)
}
func (o *AuditOptions) ApplyTo(
c *server.Config,
) error {
if o == nil {
return nil
}
if c == nil {
return fmt.Errorf("server config must be non-nil")
}
// 1. Build policy checker
checker, err := o.newPolicyChecker()
if err != nil {
return err
}
// 2. Build log backend
var logBackend audit.Backend
w, err := o.LogOptions.getWriter()
if err != nil {
return err
}
if w != nil {
if checker == nil {
klog.V(2).Info("No audit policy file provided, no events will be recorded for log backend")
} else {
logBackend = o.LogOptions.newBackend(w)
}
}
// 3. Build webhook backend
var webhookBackend audit.Backend
if o.WebhookOptions.enabled() {
if checker == nil {
klog.V(2).Info("No audit policy file provided, no events will be recorded for webhook backend")
} else {
if c.EgressSelector != nil {
var egressDialer utilnet.DialFunc
egressDialer, err = c.EgressSelector.Lookup(egressselector.ControlPlane.AsNetworkContext())
if err != nil {
return err
}
webhookBackend, err = o.WebhookOptions.newUntruncatedBackend(egressDialer)
} else {
webhookBackend, err = o.WebhookOptions.newUntruncatedBackend(nil)
}
if err != nil {
return err
}
}
}
groupVersion, err := schema.ParseGroupVersion(o.WebhookOptions.GroupVersionString)
if err != nil {
return err
}
// 4. Apply dynamic options.
var dynamicBackend audit.Backend
if webhookBackend != nil {
// if only webhook is enabled wrap it in the truncate options
dynamicBackend = o.WebhookOptions.TruncateOptions.wrapBackend(webhookBackend, groupVersion)
}
// 5. Set the policy checker
c.AuditPolicyChecker = checker
// 6. Join the log backend with the webhooks
c.AuditBackend = appendBackend(logBackend, dynamicBackend)
if c.AuditBackend != nil {
klog.V(2).Infof("Using audit backend: %s", c.AuditBackend)
}
return nil
}
func (o *AuditOptions) newPolicyChecker() (policy.Checker, error) {
if o.PolicyFile == "" {
return nil, nil
}
p, err := policy.LoadPolicyFromFile(o.PolicyFile)
if err != nil {
return nil, fmt.Errorf("loading audit policy file: %v", err)
}
return policy.NewChecker(p), nil
}
func (o *AuditBatchOptions) AddFlags(pluginName string, fs *pflag.FlagSet) {
fs.StringVar(&o.Mode, fmt.Sprintf("audit-%s-mode", pluginName), o.Mode,
"Strategy for sending audit events. Blocking indicates sending events should block"+
" server responses. Batch causes the backend to buffer and write events"+
" asynchronously. Known modes are "+strings.Join(AllowedModes, ",")+".")
fs.IntVar(&o.BatchConfig.BufferSize, fmt.Sprintf("audit-%s-batch-buffer-size", pluginName),
o.BatchConfig.BufferSize, "The size of the buffer to store events before "+
"batching and writing. Only used in batch mode.")
fs.IntVar(&o.BatchConfig.MaxBatchSize, fmt.Sprintf("audit-%s-batch-max-size", pluginName),
o.BatchConfig.MaxBatchSize, "The maximum size of a batch. Only used in batch mode.")
fs.DurationVar(&o.BatchConfig.MaxBatchWait, fmt.Sprintf("audit-%s-batch-max-wait", pluginName),
o.BatchConfig.MaxBatchWait, "The amount of time to wait before force writing the "+
"batch that hadn't reached the max size. Only used in batch mode.")
fs.BoolVar(&o.BatchConfig.ThrottleEnable, fmt.Sprintf("audit-%s-batch-throttle-enable", pluginName),
o.BatchConfig.ThrottleEnable, "Whether batching throttling is enabled. Only used in batch mode.")
fs.Float32Var(&o.BatchConfig.ThrottleQPS, fmt.Sprintf("audit-%s-batch-throttle-qps", pluginName),
o.BatchConfig.ThrottleQPS, "Maximum average number of batches per second. "+
"Only used in batch mode.")
fs.IntVar(&o.BatchConfig.ThrottleBurst, fmt.Sprintf("audit-%s-batch-throttle-burst", pluginName),
o.BatchConfig.ThrottleBurst, "Maximum number of requests sent at the same "+
"moment if ThrottleQPS was not utilized before. Only used in batch mode.")
}
type ignoreErrorsBackend struct {
audit.Backend
}
func (i *ignoreErrorsBackend) ProcessEvents(ev ...*auditinternal.Event) bool {
i.Backend.ProcessEvents(ev...)
return true
}
func (i *ignoreErrorsBackend) String() string {
return fmt.Sprintf("ignoreErrors<%s>", i.Backend)
}
func (o *AuditBatchOptions) wrapBackend(delegate audit.Backend) audit.Backend {
if o.Mode == ModeBlockingStrict {
return delegate
}
if o.Mode == ModeBlocking {
return &ignoreErrorsBackend{Backend: delegate}
}
return pluginbuffered.NewBackend(delegate, o.BatchConfig)
}
func (o *AuditTruncateOptions) Validate(pluginName string) error {
config := o.TruncateConfig
if config.MaxEventSize <= 0 {
return fmt.Errorf("invalid audit truncate %s max event size %v, must be a positive number", pluginName, config.MaxEventSize)
}
if config.MaxBatchSize < config.MaxEventSize {
return fmt.Errorf("invalid audit truncate %s max batch size %v, must be greater than "+
"max event size (%v)", pluginName, config.MaxBatchSize, config.MaxEventSize)
}
return nil
}
func (o *AuditTruncateOptions) AddFlags(pluginName string, fs *pflag.FlagSet) {
fs.BoolVar(&o.Enabled, fmt.Sprintf("audit-%s-truncate-enabled", pluginName),
o.Enabled, "Whether event and batch truncating is enabled.")
fs.Int64Var(&o.TruncateConfig.MaxBatchSize, fmt.Sprintf("audit-%s-truncate-max-batch-size", pluginName),
o.TruncateConfig.MaxBatchSize, "Maximum size of the batch sent to the underlying backend. "+
"Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, "+
"it is split into several batches of smaller size.")
fs.Int64Var(&o.TruncateConfig.MaxEventSize, fmt.Sprintf("audit-%s-truncate-max-event-size", pluginName),
o.TruncateConfig.MaxEventSize, "Maximum size of the audit event sent to the underlying backend. "+
"If the size of an event is greater than this number, first request and response are removed, and "+
"if this doesn't reduce the size enough, event is discarded.")
}
func (o *AuditTruncateOptions) wrapBackend(delegate audit.Backend, gv schema.GroupVersion) audit.Backend {
if !o.Enabled {
return delegate
}
return plugintruncate.NewBackend(delegate, o.TruncateConfig, gv)
}
func (o *AuditLogOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.Path, "audit-log-path", o.Path,
"If set, all requests coming to the apiserver will be logged to this file. '-' means standard out.")
fs.IntVar(&o.MaxAge, "audit-log-maxage", o.MaxAge,
"The maximum number of days to retain old audit log files based on the timestamp encoded in their filename.")
fs.IntVar(&o.MaxBackups, "audit-log-maxbackup", o.MaxBackups,
"The maximum number of old audit log files to retain.")
fs.IntVar(&o.MaxSize, "audit-log-maxsize", o.MaxSize,
"The maximum size in megabytes of the audit log file before it gets rotated.")
fs.StringVar(&o.Format, "audit-log-format", o.Format,
"Format of saved audits. \"legacy\" indicates 1-line text format for each event."+
" \"json\" indicates structured json format. Known formats are "+
strings.Join(pluginlog.AllowedFormats, ",")+".")
fs.StringVar(&o.GroupVersionString, "audit-log-version", o.GroupVersionString,
"API group and version used for serializing audit events written to log.")
fs.BoolVar(&o.Compress, "audit-log-compress", o.Compress, "If set, the rotated log files will be compressed using gzip.")
}
func (o *AuditLogOptions) Validate() []error {
// Check whether the log backend is enabled based on the options.
if !o.enabled() {
return nil
}
var allErrors []error
if err := validateBackendBatchOptions(pluginlog.PluginName, o.BatchOptions); err != nil {
allErrors = append(allErrors, err)
}
if err := o.TruncateOptions.Validate(pluginlog.PluginName); err != nil {
allErrors = append(allErrors, err)
}
if err := validateGroupVersionString(o.GroupVersionString); err != nil {
allErrors = append(allErrors, err)
}
// Check log format
if !sets.NewString(pluginlog.AllowedFormats...).Has(o.Format) {
allErrors = append(allErrors, fmt.Errorf("invalid audit log format %s, allowed formats are %q", o.Format, strings.Join(pluginlog.AllowedFormats, ",")))
}
// Check validities of MaxAge, MaxBackups and MaxSize of log options, if file log backend is enabled.
if o.MaxAge < 0 {
allErrors = append(allErrors, fmt.Errorf("--audit-log-maxage %v can't be a negative number", o.MaxAge))
}
if o.MaxBackups < 0 {
allErrors = append(allErrors, fmt.Errorf("--audit-log-maxbackup %v can't be a negative number", o.MaxBackups))
}
if o.MaxSize < 0 {
allErrors = append(allErrors, fmt.Errorf("--audit-log-maxsize %v can't be a negative number", o.MaxSize))
}
return allErrors
}
// Check whether the log backend is enabled based on the options.
func (o *AuditLogOptions) enabled() bool {
return o != nil && o.Path != ""
}
func (o *AuditLogOptions) getWriter() (io.Writer, error) {
if !o.enabled() {
return nil, nil
}
if o.Path == "-" {
return os.Stdout, nil
}
if err := o.ensureLogFile(); err != nil {
return nil, fmt.Errorf("ensureLogFile: %w", err)
}
return &lumberjack.Logger{
Filename: o.Path,
MaxAge: o.MaxAge,
MaxBackups: o.MaxBackups,
MaxSize: o.MaxSize,
Compress: o.Compress,
}, nil
}
func (o *AuditLogOptions) ensureLogFile() error {
mode := os.FileMode(0600)
f, err := os.OpenFile(o.Path, os.O_CREATE|os.O_APPEND|os.O_RDWR, mode)
if err != nil {
return err
}
return f.Close()
}
func (o *AuditLogOptions) newBackend(w io.Writer) audit.Backend {
groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString)
log := pluginlog.NewBackend(w, o.Format, groupVersion)
log = o.BatchOptions.wrapBackend(log)
log = o.TruncateOptions.wrapBackend(log, groupVersion)
return log
}
func (o *AuditWebhookOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.ConfigFile, "audit-webhook-config-file", o.ConfigFile,
"Path to a kubeconfig formatted file that defines the audit webhook configuration.")
fs.DurationVar(&o.InitialBackoff, "audit-webhook-initial-backoff",
o.InitialBackoff, "The amount of time to wait before retrying the first failed request.")
fs.DurationVar(&o.InitialBackoff, "audit-webhook-batch-initial-backoff",
o.InitialBackoff, "The amount of time to wait before retrying the first failed request.")
fs.MarkDeprecated("audit-webhook-batch-initial-backoff",
"Deprecated, use --audit-webhook-initial-backoff instead.")
fs.StringVar(&o.GroupVersionString, "audit-webhook-version", o.GroupVersionString,
"API group and version used for serializing audit events written to webhook.")
}
func (o *AuditWebhookOptions) Validate() []error {
if !o.enabled() {
return nil
}
var allErrors []error
if err := validateBackendBatchOptions(pluginwebhook.PluginName, o.BatchOptions); err != nil {
allErrors = append(allErrors, err)
}
if err := o.TruncateOptions.Validate(pluginwebhook.PluginName); err != nil {
allErrors = append(allErrors, err)
}
if err := validateGroupVersionString(o.GroupVersionString); err != nil {
allErrors = append(allErrors, err)
}
return allErrors
}
func (o *AuditWebhookOptions) enabled() bool {
return o != nil && o.ConfigFile != ""
}
// newUntruncatedBackend returns a webhook backend without the truncate options applied
// this is done so that the same trucate backend can wrap both the webhook and dynamic backends
func (o *AuditWebhookOptions) newUntruncatedBackend(customDial utilnet.DialFunc) (audit.Backend, error) {
groupVersion, _ := schema.ParseGroupVersion(o.GroupVersionString)
webhook, err := pluginwebhook.NewBackend(o.ConfigFile, groupVersion, webhook.DefaultRetryBackoffWithInitialDelay(o.InitialBackoff), customDial)
if err != nil {
return nil, fmt.Errorf("initializing audit webhook: %v", err)
}
webhook = o.BatchOptions.wrapBackend(webhook)
return webhook, nil
}
// defaultWebhookBatchConfig returns the default BatchConfig used by the Webhook backend.
func defaultWebhookBatchConfig() pluginbuffered.BatchConfig {
return pluginbuffered.BatchConfig{
BufferSize: defaultBatchBufferSize,
MaxBatchSize: defaultBatchMaxSize,
MaxBatchWait: defaultBatchMaxWait,
ThrottleEnable: true,
ThrottleQPS: defaultBatchThrottleQPS,
ThrottleBurst: defaultBatchThrottleBurst,
AsyncDelegate: true,
}
}
// defaultLogBatchConfig returns the default BatchConfig used by the Log backend.
func defaultLogBatchConfig() pluginbuffered.BatchConfig {
return pluginbuffered.BatchConfig{
BufferSize: defaultBatchBufferSize,
// Batching is not useful for the log-file backend.
// MaxBatchWait ignored.
MaxBatchSize: 1,
ThrottleEnable: false,
// Asynchronous log threads just create lock contention.
AsyncDelegate: false,
}
}

View File

@ -0,0 +1,440 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"strings"
"time"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/authenticatorfactory"
"k8s.io/apiserver/pkg/authentication/request/headerrequest"
"k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/dynamiccertificates"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/transport"
"k8s.io/klog/v2"
openapicommon "k8s.io/kube-openapi/pkg/common"
)
// DefaultAuthWebhookRetryBackoff is the default backoff parameters for
// both authentication and authorization webhook used by the apiserver.
func DefaultAuthWebhookRetryBackoff() *wait.Backoff {
return &wait.Backoff{
Duration: 500 * time.Millisecond,
Factor: 1.5,
Jitter: 0.2,
Steps: 5,
}
}
type RequestHeaderAuthenticationOptions struct {
// ClientCAFile is the root certificate bundle to verify client certificates on incoming requests
// before trusting usernames in headers.
ClientCAFile string
UsernameHeaders []string
GroupHeaders []string
ExtraHeaderPrefixes []string
AllowedNames []string
}
func (s *RequestHeaderAuthenticationOptions) Validate() []error {
allErrors := []error{}
if err := checkForWhiteSpaceOnly("requestheader-username-headers", s.UsernameHeaders...); err != nil {
allErrors = append(allErrors, err)
}
if err := checkForWhiteSpaceOnly("requestheader-group-headers", s.GroupHeaders...); err != nil {
allErrors = append(allErrors, err)
}
if err := checkForWhiteSpaceOnly("requestheader-extra-headers-prefix", s.ExtraHeaderPrefixes...); err != nil {
allErrors = append(allErrors, err)
}
if err := checkForWhiteSpaceOnly("requestheader-allowed-names", s.AllowedNames...); err != nil {
allErrors = append(allErrors, err)
}
return allErrors
}
func checkForWhiteSpaceOnly(flag string, headerNames ...string) error {
for _, headerName := range headerNames {
if len(strings.TrimSpace(headerName)) == 0 {
return fmt.Errorf("empty value in %q", flag)
}
}
return nil
}
func (s *RequestHeaderAuthenticationOptions) AddFlags(fs *pflag.FlagSet) {
if s == nil {
return
}
fs.StringSliceVar(&s.UsernameHeaders, "requestheader-username-headers", s.UsernameHeaders, ""+
"List of request headers to inspect for usernames. X-Remote-User is common.")
fs.StringSliceVar(&s.GroupHeaders, "requestheader-group-headers", s.GroupHeaders, ""+
"List of request headers to inspect for groups. X-Remote-Group is suggested.")
fs.StringSliceVar(&s.ExtraHeaderPrefixes, "requestheader-extra-headers-prefix", s.ExtraHeaderPrefixes, ""+
"List of request header prefixes to inspect. X-Remote-Extra- is suggested.")
fs.StringVar(&s.ClientCAFile, "requestheader-client-ca-file", s.ClientCAFile, ""+
"Root certificate bundle to use to verify client certificates on incoming requests "+
"before trusting usernames in headers specified by --requestheader-username-headers. "+
"WARNING: generally do not depend on authorization being already done for incoming requests.")
fs.StringSliceVar(&s.AllowedNames, "requestheader-allowed-names", s.AllowedNames, ""+
"List of client certificate common names to allow to provide usernames in headers "+
"specified by --requestheader-username-headers. If empty, any client certificate validated "+
"by the authorities in --requestheader-client-ca-file is allowed.")
}
// ToAuthenticationRequestHeaderConfig returns a RequestHeaderConfig config object for these options
// if necessary, nil otherwise.
func (s *RequestHeaderAuthenticationOptions) ToAuthenticationRequestHeaderConfig() (*authenticatorfactory.RequestHeaderConfig, error) {
if len(s.ClientCAFile) == 0 {
return nil, nil
}
caBundleProvider, err := dynamiccertificates.NewDynamicCAContentFromFile("request-header", s.ClientCAFile)
if err != nil {
return nil, err
}
return &authenticatorfactory.RequestHeaderConfig{
UsernameHeaders: headerrequest.StaticStringSlice(s.UsernameHeaders),
GroupHeaders: headerrequest.StaticStringSlice(s.GroupHeaders),
ExtraHeaderPrefixes: headerrequest.StaticStringSlice(s.ExtraHeaderPrefixes),
CAContentProvider: caBundleProvider,
AllowedClientNames: headerrequest.StaticStringSlice(s.AllowedNames),
}, nil
}
// ClientCertAuthenticationOptions provides different options for client cert auth. You should use `GetClientVerifyOptionFn` to
// get the verify options for your authenticator.
type ClientCertAuthenticationOptions struct {
// ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
ClientCA string
// CAContentProvider are the options for verifying incoming connections using mTLS and directly assigning to users.
// Generally this is the CA bundle file used to authenticate client certificates
// If non-nil, this takes priority over the ClientCA file.
CAContentProvider dynamiccertificates.CAContentProvider
}
// GetClientVerifyOptionFn provides verify options for your authenticator while respecting the preferred order of verifiers.
func (s *ClientCertAuthenticationOptions) GetClientCAContentProvider() (dynamiccertificates.CAContentProvider, error) {
if s.CAContentProvider != nil {
return s.CAContentProvider, nil
}
if len(s.ClientCA) == 0 {
return nil, nil
}
return dynamiccertificates.NewDynamicCAContentFromFile("client-ca-bundle", s.ClientCA)
}
func (s *ClientCertAuthenticationOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.ClientCA, "client-ca-file", s.ClientCA, ""+
"If set, any request presenting a client certificate signed by one of "+
"the authorities in the client-ca-file is authenticated with an identity "+
"corresponding to the CommonName of the client certificate.")
}
// DelegatingAuthenticationOptions provides an easy way for composing API servers to delegate their authentication to
// the root kube API server. The API federator will act as
// a front proxy and direction connections will be able to delegate to the core kube API server
type DelegatingAuthenticationOptions struct {
// RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the
// TokenAccessReview.authentication.k8s.io endpoint for checking tokens.
RemoteKubeConfigFile string
// RemoteKubeConfigFileOptional is specifying whether not specifying the kubeconfig or
// a missing in-cluster config will be fatal.
RemoteKubeConfigFileOptional bool
// CacheTTL is the length of time that a token authentication answer will be cached.
CacheTTL time.Duration
ClientCert ClientCertAuthenticationOptions
RequestHeader RequestHeaderAuthenticationOptions
// SkipInClusterLookup indicates missing authentication configuration should not be retrieved from the cluster configmap
SkipInClusterLookup bool
// TolerateInClusterLookupFailure indicates failures to look up authentication configuration from the cluster configmap should not be fatal.
// Setting this can result in an authenticator that will reject all requests.
TolerateInClusterLookupFailure bool
// WebhookRetryBackoff specifies the backoff parameters for the authentication webhook retry logic.
// This allows us to configure the sleep time at each iteration and the maximum number of retries allowed
// before we fail the webhook call in order to limit the fan out that ensues when the system is degraded.
WebhookRetryBackoff *wait.Backoff
// TokenRequestTimeout specifies a time limit for requests made by the authorization webhook client.
// The default value is set to 10 seconds.
TokenRequestTimeout time.Duration
// CustomRoundTripperFn allows for specifying a middleware function for custom HTTP behaviour for the authentication webhook client.
CustomRoundTripperFn transport.WrapperFunc
}
func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions {
return &DelegatingAuthenticationOptions{
// very low for responsiveness, but high enough to handle storms
CacheTTL: 10 * time.Second,
ClientCert: ClientCertAuthenticationOptions{},
RequestHeader: RequestHeaderAuthenticationOptions{
UsernameHeaders: []string{"x-remote-user"},
GroupHeaders: []string{"x-remote-group"},
ExtraHeaderPrefixes: []string{"x-remote-extra-"},
},
WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(),
TokenRequestTimeout: 10 * time.Second,
}
}
// WithCustomRetryBackoff sets the custom backoff parameters for the authentication webhook retry logic.
func (s *DelegatingAuthenticationOptions) WithCustomRetryBackoff(backoff wait.Backoff) {
s.WebhookRetryBackoff = &backoff
}
// WithRequestTimeout sets the given timeout for requests made by the authentication webhook client.
func (s *DelegatingAuthenticationOptions) WithRequestTimeout(timeout time.Duration) {
s.TokenRequestTimeout = timeout
}
// WithCustomRoundTripper allows for specifying a middleware function for custom HTTP behaviour for the authentication webhook client.
func (s *DelegatingAuthenticationOptions) WithCustomRoundTripper(rt transport.WrapperFunc) {
s.CustomRoundTripperFn = rt
}
func (s *DelegatingAuthenticationOptions) Validate() []error {
if s == nil {
return nil
}
allErrors := []error{}
allErrors = append(allErrors, s.RequestHeader.Validate()...)
if s.WebhookRetryBackoff != nil && s.WebhookRetryBackoff.Steps <= 0 {
allErrors = append(allErrors, fmt.Errorf("number of webhook retry attempts must be greater than 1, but is: %d", s.WebhookRetryBackoff.Steps))
}
return allErrors
}
func (s *DelegatingAuthenticationOptions) AddFlags(fs *pflag.FlagSet) {
if s == nil {
return
}
var optionalKubeConfigSentence string
if s.RemoteKubeConfigFileOptional {
optionalKubeConfigSentence = " This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster."
}
fs.StringVar(&s.RemoteKubeConfigFile, "authentication-kubeconfig", s.RemoteKubeConfigFile, ""+
"kubeconfig file pointing at the 'core' kubernetes server with enough rights to create "+
"tokenreviews.authentication.k8s.io."+optionalKubeConfigSentence)
fs.DurationVar(&s.CacheTTL, "authentication-token-webhook-cache-ttl", s.CacheTTL,
"The duration to cache responses from the webhook token authenticator.")
s.ClientCert.AddFlags(fs)
s.RequestHeader.AddFlags(fs)
fs.BoolVar(&s.SkipInClusterLookup, "authentication-skip-lookup", s.SkipInClusterLookup, ""+
"If false, the authentication-kubeconfig will be used to lookup missing authentication "+
"configuration from the cluster.")
fs.BoolVar(&s.TolerateInClusterLookupFailure, "authentication-tolerate-lookup-failure", s.TolerateInClusterLookupFailure, ""+
"If true, failures to look up missing authentication configuration from the cluster are not considered fatal. "+
"Note that this can result in authentication that treats all requests as anonymous.")
}
func (s *DelegatingAuthenticationOptions) ApplyTo(authenticationInfo *server.AuthenticationInfo, servingInfo *server.SecureServingInfo, openAPIConfig *openapicommon.Config) error {
if s == nil {
authenticationInfo.Authenticator = nil
return nil
}
cfg := authenticatorfactory.DelegatingAuthenticatorConfig{
Anonymous: true,
CacheTTL: s.CacheTTL,
WebhookRetryBackoff: s.WebhookRetryBackoff,
TokenAccessReviewTimeout: s.TokenRequestTimeout,
}
client, err := s.getClient()
if err != nil {
return fmt.Errorf("failed to get delegated authentication kubeconfig: %v", err)
}
// configure token review
if client != nil {
cfg.TokenAccessReviewClient = client.AuthenticationV1()
}
// get the clientCA information
clientCASpecified := s.ClientCert != ClientCertAuthenticationOptions{}
var clientCAProvider dynamiccertificates.CAContentProvider
if clientCASpecified {
clientCAProvider, err = s.ClientCert.GetClientCAContentProvider()
if err != nil {
return fmt.Errorf("unable to load client CA provider: %v", err)
}
cfg.ClientCertificateCAContentProvider = clientCAProvider
if err = authenticationInfo.ApplyClientCert(cfg.ClientCertificateCAContentProvider, servingInfo); err != nil {
return fmt.Errorf("unable to assign client CA provider: %v", err)
}
} else if !s.SkipInClusterLookup {
if client == nil {
klog.Warningf("No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace)
} else {
clientCAProvider, err = dynamiccertificates.NewDynamicCAFromConfigMapController("client-ca", authenticationConfigMapNamespace, authenticationConfigMapName, "client-ca-file", client)
if err != nil {
return fmt.Errorf("unable to load configmap based client CA file: %v", err)
}
cfg.ClientCertificateCAContentProvider = clientCAProvider
if err = authenticationInfo.ApplyClientCert(cfg.ClientCertificateCAContentProvider, servingInfo); err != nil {
return fmt.Errorf("unable to assign configmap based client CA file: %v", err)
}
}
}
requestHeaderCAFileSpecified := len(s.RequestHeader.ClientCAFile) > 0
var requestHeaderConfig *authenticatorfactory.RequestHeaderConfig
if requestHeaderCAFileSpecified {
requestHeaderConfig, err = s.RequestHeader.ToAuthenticationRequestHeaderConfig()
if err != nil {
return fmt.Errorf("unable to create request header authentication config: %v", err)
}
} else if !s.SkipInClusterLookup {
if client == nil {
klog.Warningf("No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace)
} else {
requestHeaderConfig, err = s.createRequestHeaderConfig(client)
if err != nil {
if s.TolerateInClusterLookupFailure {
klog.Warningf("Error looking up in-cluster authentication configuration: %v", err)
klog.Warning("Continuing without authentication configuration. This may treat all requests as anonymous.")
klog.Warning("To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false")
} else {
return fmt.Errorf("unable to load configmap based request-header-client-ca-file: %v", err)
}
}
}
}
if requestHeaderConfig != nil {
cfg.RequestHeaderConfig = requestHeaderConfig
if err = authenticationInfo.ApplyClientCert(cfg.RequestHeaderConfig.CAContentProvider, servingInfo); err != nil {
return fmt.Errorf("unable to load request-header-client-ca-file: %v", err)
}
}
// create authenticator
authenticator, securityDefinitions, err := cfg.New()
if err != nil {
return err
}
authenticationInfo.Authenticator = authenticator
if openAPIConfig != nil {
openAPIConfig.SecurityDefinitions = securityDefinitions
}
return nil
}
const (
authenticationConfigMapNamespace = metav1.NamespaceSystem
// authenticationConfigMapName is the name of ConfigMap in the kube-system namespace holding the root certificate
// bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified
// by --requestheader-username-headers. This is created in the cluster by the kube-apiserver.
// "WARNING: generally do not depend on authorization being already done for incoming requests.")
authenticationConfigMapName = "extension-apiserver-authentication"
)
func (s *DelegatingAuthenticationOptions) createRequestHeaderConfig(client kubernetes.Interface) (*authenticatorfactory.RequestHeaderConfig, error) {
dynamicRequestHeaderProvider, err := newDynamicRequestHeaderController(client)
if err != nil {
return nil, fmt.Errorf("unable to create request header authentication config: %v", err)
}
// look up authentication configuration in the cluster and in case of an err defer to authentication-tolerate-lookup-failure flag
if err := dynamicRequestHeaderProvider.RunOnce(); err != nil {
return nil, err
}
return &authenticatorfactory.RequestHeaderConfig{
CAContentProvider: dynamicRequestHeaderProvider,
UsernameHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.UsernameHeaders)),
GroupHeaders: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.GroupHeaders)),
ExtraHeaderPrefixes: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.ExtraHeaderPrefixes)),
AllowedClientNames: headerrequest.StringSliceProvider(headerrequest.StringSliceProviderFunc(dynamicRequestHeaderProvider.AllowedClientNames)),
}, nil
}
// getClient returns a Kubernetes clientset. If s.RemoteKubeConfigFileOptional is true, nil will be returned
// if no kubeconfig is specified by the user and the in-cluster config is not found.
func (s *DelegatingAuthenticationOptions) getClient() (kubernetes.Interface, error) {
var clientConfig *rest.Config
var err error
if len(s.RemoteKubeConfigFile) > 0 {
loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.RemoteKubeConfigFile}
loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})
clientConfig, err = loader.ClientConfig()
} else {
// without the remote kubeconfig file, try to use the in-cluster config. Most addon API servers will
// use this path. If it is optional, ignore errors.
clientConfig, err = rest.InClusterConfig()
if err != nil && s.RemoteKubeConfigFileOptional {
if err != rest.ErrNotInCluster {
klog.Warningf("failed to read in-cluster kubeconfig for delegated authentication: %v", err)
}
return nil, nil
}
}
if err != nil {
return nil, fmt.Errorf("failed to get delegated authentication kubeconfig: %v", err)
}
// set high qps/burst limits since this will effectively limit API server responsiveness
clientConfig.QPS = 200
clientConfig.Burst = 400
// do not set a timeout on the http client, instead use context for cancellation
// if multiple timeouts were set, the request will pick the smaller timeout to be applied, leaving other useless.
//
// see https://github.com/golang/go/blob/a937729c2c2f6950a32bc5cd0f5b88700882f078/src/net/http/client.go#L364
if s.CustomRoundTripperFn != nil {
clientConfig.Wrap(s.CustomRoundTripperFn)
}
return kubernetes.NewForConfig(clientConfig)
}

Some files were not shown because too many files have changed in this diff Show More