Update Godeps
This commit is contained in:
		
							parent
							
								
									db884f5337
								
							
						
					
					
						commit
						cc0b9b204c
					
				
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -4,9 +4,30 @@ This package supports launching Windows Server containers from Go. It is | |||
| primarily used in the [Docker Engine](https://github.com/docker/docker) project, | ||||
| but it can be freely used by other projects as well. | ||||
| 
 | ||||
| This project has adopted the [Microsoft Open Source Code of | ||||
| Conduct](https://opensource.microsoft.com/codeofconduct/). For more information | ||||
| see the [Code of Conduct | ||||
| FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact | ||||
| [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional | ||||
| questions or comments. | ||||
| 
 | ||||
| ## Contributing | ||||
| --------------- | ||||
| This project welcomes contributions and suggestions.  Most contributions require you to agree to a | ||||
| Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us | ||||
| the rights to use your contribution. For details, visit https://cla.microsoft.com. | ||||
| 
 | ||||
| When you submit a pull request, a CLA-bot will automatically determine whether you need to provide | ||||
| a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions | ||||
| provided by the bot. You will only need to do this once across all repos using our CLA. | ||||
| 
 | ||||
| This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). | ||||
| For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or | ||||
| contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. | ||||
| 
 | ||||
| 
 | ||||
| ## Reporting Security Issues | ||||
| 
 | ||||
| Security issues and bugs should be reported privately, via email, to the Microsoft Security | ||||
| Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should | ||||
| receive a response within 24 hours. If for some reason you do not, please follow up via | ||||
| email to ensure we received your original message. Further information, including the | ||||
| [MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in | ||||
| the [Security TechCenter](https://technet.microsoft.com/en-us/security/default). | ||||
| 
 | ||||
| ------------------------------------------- | ||||
| Copyright (c) 2018 Microsoft Corp.  All rights reserved. | ||||
|  |  | |||
|  | @ -10,7 +10,7 @@ import ( | |||
| ) | ||||
| 
 | ||||
| type baseLayerWriter struct { | ||||
| 	root         string | ||||
| 	root         *os.File | ||||
| 	f            *os.File | ||||
| 	bw           *winio.BackupFileWriter | ||||
| 	err          error | ||||
|  | @ -26,10 +26,10 @@ type dirInfo struct { | |||
| // reapplyDirectoryTimes reapplies directory modification, creation, etc. times
 | ||||
| // after processing of the directory tree has completed. The times are expected
 | ||||
| // to be ordered such that parent directories come before child directories.
 | ||||
| func reapplyDirectoryTimes(dis []dirInfo) error { | ||||
| func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { | ||||
| 	for i := range dis { | ||||
| 		di := &dis[len(dis)-i-1] // reverse order: process child directories first
 | ||||
| 		f, err := winio.OpenForBackup(di.path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, syscall.OPEN_EXISTING) | ||||
| 		f, err := openRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_OPEN, _FILE_DIRECTORY_FILE) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | @ -75,12 +75,6 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e | |||
| 		w.hasUtilityVM = true | ||||
| 	} | ||||
| 
 | ||||
| 	path := filepath.Join(w.root, name) | ||||
| 	path, err = makeLongAbsPath(path) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	var f *os.File | ||||
| 	defer func() { | ||||
| 		if f != nil { | ||||
|  | @ -88,27 +82,23 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e | |||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	createmode := uint32(syscall.CREATE_NEW) | ||||
| 	extraFlags := uint32(0) | ||||
| 	if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { | ||||
| 		err := os.Mkdir(path, 0) | ||||
| 		if err != nil && !os.IsExist(err) { | ||||
| 			return err | ||||
| 		} | ||||
| 		createmode = syscall.OPEN_EXISTING | ||||
| 		extraFlags |= _FILE_DIRECTORY_FILE | ||||
| 		if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { | ||||
| 			w.dirInfo = append(w.dirInfo, dirInfo{path, *fileInfo}) | ||||
| 			w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) | ||||
| 	f, err = winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createmode) | ||||
| 	f, err = openRelative(name, w.root, mode, syscall.FILE_SHARE_READ, _FILE_CREATE, extraFlags) | ||||
| 	if err != nil { | ||||
| 		return makeError(err, "Failed to OpenForBackup", path) | ||||
| 		return makeError(err, "Failed to openRelative", name) | ||||
| 	} | ||||
| 
 | ||||
| 	err = winio.SetFileBasicInfo(f, fileInfo) | ||||
| 	if err != nil { | ||||
| 		return makeError(err, "Failed to SetFileBasicInfo", path) | ||||
| 		return makeError(err, "Failed to SetFileBasicInfo", name) | ||||
| 	} | ||||
| 
 | ||||
| 	w.f = f | ||||
|  | @ -129,17 +119,7 @@ func (w *baseLayerWriter) AddLink(name string, target string) (err error) { | |||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	linkpath, err := makeLongAbsPath(filepath.Join(w.root, name)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	linktarget, err := makeLongAbsPath(filepath.Join(w.root, target)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	return os.Link(linktarget, linkpath) | ||||
| 	return linkRelative(target, w.root, name, w.root) | ||||
| } | ||||
| 
 | ||||
| func (w *baseLayerWriter) Remove(name string) error { | ||||
|  | @ -155,6 +135,10 @@ func (w *baseLayerWriter) Write(b []byte) (int, error) { | |||
| } | ||||
| 
 | ||||
| func (w *baseLayerWriter) Close() error { | ||||
| 	defer func() { | ||||
| 		w.root.Close() | ||||
| 		w.root = nil | ||||
| 	}() | ||||
| 	err := w.closeCurrentFile() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
|  | @ -162,18 +146,22 @@ func (w *baseLayerWriter) Close() error { | |||
| 	if w.err == nil { | ||||
| 		// Restore the file times of all the directories, since they may have
 | ||||
| 		// been modified by creating child directories.
 | ||||
| 		err = reapplyDirectoryTimes(w.dirInfo) | ||||
| 		err = reapplyDirectoryTimes(w.root, w.dirInfo) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		err = ProcessBaseLayer(w.root) | ||||
| 		err = ProcessBaseLayer(w.root.Name()) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		if w.hasUtilityVM { | ||||
| 			err = ProcessUtilityVMImage(filepath.Join(w.root, "UtilityVM")) | ||||
| 			err := ensureNotReparsePointRelative("UtilityVM", w.root) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			err = ProcessUtilityVMImage(filepath.Join(w.root.Name(), "UtilityVM")) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  |  | |||
|  | @ -201,12 +201,18 @@ func createContainerWithJSON(id string, c *ContainerConfig, additionalJSON strin | |||
| 
 | ||||
| 	if createError == nil || IsPending(createError) { | ||||
| 		if err := container.registerCallback(); err != nil { | ||||
| 			// Terminate the container if it still exists. We're okay to ignore a failure here.
 | ||||
| 			container.Terminate() | ||||
| 			return nil, makeContainerError(container, operation, "", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout) | ||||
| 	if err != nil { | ||||
| 		if err == ErrTimeout { | ||||
| 			// Terminate the container if it still exists. We're okay to ignore a failure here.
 | ||||
| 			container.Terminate() | ||||
| 		} | ||||
| 		return nil, makeContainerError(container, operation, configuration, err) | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -72,6 +72,22 @@ var ( | |||
| 	ErrPlatformNotSupported = errors.New("unsupported platform request") | ||||
| ) | ||||
| 
 | ||||
| type EndpointNotFoundError struct { | ||||
| 	EndpointName string | ||||
| } | ||||
| 
 | ||||
| func (e EndpointNotFoundError) Error() string { | ||||
| 	return fmt.Sprintf("Endpoint %s not found", e.EndpointName) | ||||
| } | ||||
| 
 | ||||
| type NetworkNotFoundError struct { | ||||
| 	NetworkName string | ||||
| } | ||||
| 
 | ||||
| func (e NetworkNotFoundError) Error() string { | ||||
| 	return fmt.Sprintf("Network %s not found", e.NetworkName) | ||||
| } | ||||
| 
 | ||||
| // ProcessError is an error encountered in HCS during an operation on a Process object
 | ||||
| type ProcessError struct { | ||||
| 	Process   *process | ||||
|  | @ -174,6 +190,12 @@ func makeProcessError(process *process, operation string, extraInfo string, err | |||
| // will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
 | ||||
| func IsNotExist(err error) bool { | ||||
| 	err = getInnerError(err) | ||||
| 	if _, ok := err.(EndpointNotFoundError); ok { | ||||
| 		return true | ||||
| 	} | ||||
| 	if _, ok := err.(NetworkNotFoundError); ok { | ||||
| 		return true | ||||
| 	} | ||||
| 	return err == ErrComputeSystemDoesNotExist || | ||||
| 		err == ErrElementNotFound || | ||||
| 		err == ErrProcNotFound | ||||
|  |  | |||
|  | @ -11,7 +11,7 @@ import ( | |||
| 	"github.com/sirupsen/logrus" | ||||
| ) | ||||
| 
 | ||||
| //go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go
 | ||||
| //go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go safeopen.go
 | ||||
| 
 | ||||
| //sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
 | ||||
| //sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
 | ||||
|  |  | |||
|  | @ -2,7 +2,6 @@ package hcsshim | |||
| 
 | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| 
 | ||||
| 	"github.com/sirupsen/logrus" | ||||
|  | @ -135,7 +134,7 @@ func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { | |||
| 			return &hnsEndpoint, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, fmt.Errorf("Endpoint %v not found", endpointName) | ||||
| 	return nil, EndpointNotFoundError{EndpointName: endpointName} | ||||
| } | ||||
| 
 | ||||
| // Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
 | ||||
|  | @ -192,18 +191,24 @@ func (endpoint *HNSEndpoint) ContainerHotDetach(containerID string) error { | |||
| 	return modifyNetworkEndpoint(containerID, endpoint.Id, Remove) | ||||
| } | ||||
| 
 | ||||
| // ApplyACLPolicy applies Acl Policy on the Endpoint
 | ||||
| func (endpoint *HNSEndpoint) ApplyACLPolicy(policy *ACLPolicy) error { | ||||
| // ApplyACLPolicy applies a set of ACL Policies on the Endpoint
 | ||||
| func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { | ||||
| 	operation := "ApplyACLPolicy" | ||||
| 	title := "HCSShim::HNSEndpoint::" + operation | ||||
| 	logrus.Debugf(title+" id=%s", endpoint.Id) | ||||
| 
 | ||||
| 	for _, policy := range policies { | ||||
| 		if policy == nil { | ||||
| 			continue | ||||
| 		} | ||||
| 		jsonString, err := json.Marshal(policy) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	endpoint.Policies[0] = jsonString | ||||
| 	_, err = endpoint.Update() | ||||
| 		endpoint.Policies = append(endpoint.Policies, jsonString) | ||||
| 	} | ||||
| 
 | ||||
| 	_, err := endpoint.Update() | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -2,7 +2,6 @@ package hcsshim | |||
| 
 | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| 
 | ||||
| 	"github.com/sirupsen/logrus" | ||||
|  | @ -90,7 +89,7 @@ func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { | |||
| 			return &hnsnetwork, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, fmt.Errorf("Network %v not found", networkName) | ||||
| 	return nil, NetworkNotFoundError{NetworkName: networkName} | ||||
| } | ||||
| 
 | ||||
| // Create Network by sending NetworkRequest to HNS.
 | ||||
|  |  | |||
|  | @ -80,12 +80,11 @@ type ACLPolicy struct { | |||
| 	InternalPort    uint16 | ||||
| 	Action          ActionType | ||||
| 	Direction       DirectionType | ||||
| 	LocalAddress  string | ||||
| 	RemoteAddress string | ||||
| 	LocalAddresses  string | ||||
| 	RemoteAddresses string | ||||
| 	LocalPort       uint16 | ||||
| 	RemotePort      uint16 | ||||
| 	RuleType        RuleType `json:"RuleType,omitempty"` | ||||
| 
 | ||||
| 	Priority        uint16 | ||||
| 	ServiceName     string | ||||
| } | ||||
|  |  | |||
|  | @ -129,37 +129,39 @@ type legacyLayerWriterWrapper struct { | |||
| } | ||||
| 
 | ||||
| func (r *legacyLayerWriterWrapper) Close() error { | ||||
| 	defer os.RemoveAll(r.root) | ||||
| 	defer os.RemoveAll(r.root.Name()) | ||||
| 	defer r.legacyLayerWriter.CloseRoots() | ||||
| 	err := r.legacyLayerWriter.Close() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	// Use the original path here because ImportLayer does not support long paths for the source in TP5.
 | ||||
| 	// But do use a long path for the destination to work around another bug with directories
 | ||||
| 	// with MAX_PATH - 12 < length < MAX_PATH.
 | ||||
| 	info := r.info | ||||
| 	fullPath, err := makeLongAbsPath(filepath.Join(info.HomeDir, r.layerID)) | ||||
| 	if err != nil { | ||||
| 	info.HomeDir = "" | ||||
| 	if err = ImportLayer(info, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	info.HomeDir = "" | ||||
| 	if err = ImportLayer(info, fullPath, r.path, r.parentLayerPaths); err != nil { | ||||
| 	for _, name := range r.Tombstones { | ||||
| 		if err = removeRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	// Add any hard links that were collected.
 | ||||
| 	for _, lnk := range r.PendingLinks { | ||||
| 		if err = os.Remove(lnk.Path); err != nil && !os.IsNotExist(err) { | ||||
| 		if err = removeRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) { | ||||
| 			return err | ||||
| 		} | ||||
| 		if err = os.Link(lnk.Target, lnk.Path); err != nil { | ||||
| 		if err = linkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	// Prepare the utility VM for use if one is present in the layer.
 | ||||
| 	if r.HasUtilityVM { | ||||
| 		err = ProcessUtilityVMImage(filepath.Join(fullPath, "UtilityVM")) | ||||
| 		err := ensureNotReparsePointRelative("UtilityVM", r.destRoot) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		err = ProcessUtilityVMImage(filepath.Join(r.destRoot.Name(), "UtilityVM")) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | @ -173,8 +175,12 @@ func (r *legacyLayerWriterWrapper) Close() error { | |||
| func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { | ||||
| 	if len(parentLayerPaths) == 0 { | ||||
| 		// This is a base layer. It gets imported differently.
 | ||||
| 		f, err := openRoot(filepath.Join(info.HomeDir, layerID)) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		return &baseLayerWriter{ | ||||
| 			root: filepath.Join(info.HomeDir, layerID), | ||||
| 			root: f, | ||||
| 		}, nil | ||||
| 	} | ||||
| 
 | ||||
|  | @ -185,8 +191,12 @@ func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) | |||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		w, err := newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID)) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		return &legacyLayerWriterWrapper{ | ||||
| 			legacyLayerWriter: newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID)), | ||||
| 			legacyLayerWriter: w, | ||||
| 			info:              info, | ||||
| 			layerID:           layerID, | ||||
| 			path:              path, | ||||
|  |  | |||
|  | @ -35,6 +35,7 @@ type MappedDir struct { | |||
| 	ReadOnly          bool | ||||
| 	BandwidthMaximum  uint64 | ||||
| 	IOPSMaximum       uint64 | ||||
| 	CreateInUtilityVM bool | ||||
| } | ||||
| 
 | ||||
| type MappedPipe struct { | ||||
|  |  | |||
|  | @ -121,6 +121,16 @@ func (r *legacyLayerReader) walkUntilCancelled() error { | |||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		// Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048.
 | ||||
| 		// Handle failure from what may be a golang bug in the conversion of
 | ||||
| 		// UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat
 | ||||
| 		// which is called by filepath.Walk will fail when a filename contains
 | ||||
| 		// unicode characters. Skip the recycle bin regardless which is goodness.
 | ||||
| 		if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { | ||||
| 			return filepath.SkipDir | ||||
| 		} | ||||
| 
 | ||||
| 		if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { | ||||
| 			return nil | ||||
| 		} | ||||
|  | @ -326,59 +336,79 @@ func (r *legacyLayerReader) Close() error { | |||
| 
 | ||||
| type pendingLink struct { | ||||
| 	Path, Target string | ||||
| 	TargetRoot   *os.File | ||||
| } | ||||
| 
 | ||||
| type pendingDir struct { | ||||
| 	Path string | ||||
| 	Root *os.File | ||||
| } | ||||
| 
 | ||||
| type legacyLayerWriter struct { | ||||
| 	root         string | ||||
| 	parentRoots  []string | ||||
| 	destRoot     string | ||||
| 	root            *os.File | ||||
| 	destRoot        *os.File | ||||
| 	parentRoots     []*os.File | ||||
| 	currentFile     *os.File | ||||
| 	currentFileName string | ||||
| 	currentFileRoot *os.File | ||||
| 	backupWriter    *winio.BackupFileWriter | ||||
| 	tombstones   []string | ||||
| 	pathFixed    bool | ||||
| 	Tombstones      []string | ||||
| 	HasUtilityVM    bool | ||||
| 	uvmDi           []dirInfo | ||||
| 	addedFiles      map[string]bool | ||||
| 	PendingLinks    []pendingLink | ||||
| 	pendingDirs     []pendingDir | ||||
| 	currentIsDir    bool | ||||
| } | ||||
| 
 | ||||
| // newLegacyLayerWriter returns a LayerWriter that can write the contaler layer
 | ||||
| // transport format to disk.
 | ||||
| func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) *legacyLayerWriter { | ||||
| 	return &legacyLayerWriter{ | ||||
| 		root:        root, | ||||
| 		parentRoots: parentRoots, | ||||
| 		destRoot:    destRoot, | ||||
| func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { | ||||
| 	w = &legacyLayerWriter{ | ||||
| 		addedFiles: make(map[string]bool), | ||||
| 	} | ||||
| 	defer func() { | ||||
| 		if err != nil { | ||||
| 			w.CloseRoots() | ||||
| 			w = nil | ||||
| 		} | ||||
| 	}() | ||||
| 	w.root, err = openRoot(root) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	w.destRoot, err = openRoot(destRoot) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	for _, r := range parentRoots { | ||||
| 		f, err := openRoot(r) | ||||
| 		if err != nil { | ||||
| 			return w, err | ||||
| 		} | ||||
| 		w.parentRoots = append(w.parentRoots, f) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| func (w *legacyLayerWriter) init() error { | ||||
| 	if !w.pathFixed { | ||||
| 		path, err := makeLongAbsPath(w.root) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| func (w *legacyLayerWriter) CloseRoots() { | ||||
| 	if w.root != nil { | ||||
| 		w.root.Close() | ||||
| 		w.root = nil | ||||
| 	} | ||||
| 		for i, p := range w.parentRoots { | ||||
| 			w.parentRoots[i], err = makeLongAbsPath(p) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 	if w.destRoot != nil { | ||||
| 		w.destRoot.Close() | ||||
| 		w.destRoot = nil | ||||
| 	} | ||||
| 	for i := range w.parentRoots { | ||||
| 		w.parentRoots[i].Close() | ||||
| 	} | ||||
| 		destPath, err := makeLongAbsPath(w.destRoot) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		w.root = path | ||||
| 		w.destRoot = destPath | ||||
| 		w.pathFixed = true | ||||
| 	} | ||||
| 	return nil | ||||
| 	w.parentRoots = nil | ||||
| } | ||||
| 
 | ||||
| func (w *legacyLayerWriter) initUtilityVM() error { | ||||
| 	if !w.HasUtilityVM { | ||||
| 		err := os.Mkdir(filepath.Join(w.destRoot, utilityVMPath), 0) | ||||
| 		err := mkdirRelative(utilityVMPath, w.destRoot) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | @ -386,7 +416,7 @@ func (w *legacyLayerWriter) initUtilityVM() error { | |||
| 		// clone the utility VM from the parent layer into this layer. Use hard
 | ||||
| 		// links to avoid unnecessary copying, since most of the files are
 | ||||
| 		// immutable.
 | ||||
| 		err = cloneTree(filepath.Join(w.parentRoots[0], utilityVMFilesPath), filepath.Join(w.destRoot, utilityVMFilesPath), mutatedUtilityVMFiles) | ||||
| 		err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("cloning the parent utility VM image failed: %s", err) | ||||
| 		} | ||||
|  | @ -395,7 +425,40 @@ func (w *legacyLayerWriter) initUtilityVM() error { | |||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (w *legacyLayerWriter) reset() { | ||||
| func (w *legacyLayerWriter) reset() error { | ||||
| 	if w.currentIsDir { | ||||
| 		r := w.currentFile | ||||
| 		br := winio.NewBackupStreamReader(r) | ||||
| 		// Seek to the beginning of the backup stream, skipping the fileattrs
 | ||||
| 		if _, err := r.Seek(4, io.SeekStart); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		for { | ||||
| 			bhdr, err := br.Next() | ||||
| 			if err == io.EOF { | ||||
| 				// end of backupstream data
 | ||||
| 				break | ||||
| 			} | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			switch bhdr.Id { | ||||
| 			case winio.BackupReparseData: | ||||
| 				// The current file is a `.$wcidirs$` metadata file that
 | ||||
| 				// describes a directory reparse point. Delete the placeholder
 | ||||
| 				// directory to prevent future files being added into the
 | ||||
| 				// destination of the reparse point during the ImportLayer call
 | ||||
| 				if err := removeRelative(w.currentFileName, w.currentFileRoot); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 				w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot}) | ||||
| 			default: | ||||
| 				// ignore all other stream types, as we only care about directory reparse points
 | ||||
| 			} | ||||
| 		} | ||||
| 		w.currentIsDir = false | ||||
| 	} | ||||
| 	if w.backupWriter != nil { | ||||
| 		w.backupWriter.Close() | ||||
| 		w.backupWriter = nil | ||||
|  | @ -403,21 +466,21 @@ func (w *legacyLayerWriter) reset() { | |||
| 	if w.currentFile != nil { | ||||
| 		w.currentFile.Close() | ||||
| 		w.currentFile = nil | ||||
| 		w.currentFileName = "" | ||||
| 		w.currentFileRoot = nil | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata
 | ||||
| func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { | ||||
| 	createDisposition := uint32(syscall.CREATE_NEW) | ||||
| 	if isDir { | ||||
| 		err = os.Mkdir(destPath, 0) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		createDisposition = syscall.OPEN_EXISTING | ||||
| 	} | ||||
| 
 | ||||
| 	src, err := openFileOrDir(srcPath, syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, syscall.OPEN_EXISTING) | ||||
| func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { | ||||
| 	src, err := openRelative( | ||||
| 		subPath, | ||||
| 		srcRoot, | ||||
| 		syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, | ||||
| 		syscall.FILE_SHARE_READ, | ||||
| 		_FILE_OPEN, | ||||
| 		_FILE_OPEN_REPARSE_POINT) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -430,7 +493,17 @@ func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio | |||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	dest, err := openFileOrDir(destPath, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition) | ||||
| 	extraFlags := uint32(0) | ||||
| 	if isDir { | ||||
| 		extraFlags |= _FILE_DIRECTORY_FILE | ||||
| 	} | ||||
| 	dest, err := openRelative( | ||||
| 		subPath, | ||||
| 		destRoot, | ||||
| 		syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, | ||||
| 		syscall.FILE_SHARE_READ, | ||||
| 		_FILE_CREATE, | ||||
| 		extraFlags) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -459,39 +532,49 @@ func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio | |||
| 
 | ||||
| // cloneTree clones a directory tree using hard links. It skips hard links for
 | ||||
| // the file names in the provided map and just copies those files.
 | ||||
| func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error { | ||||
| func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error { | ||||
| 	var di []dirInfo | ||||
| 	err := filepath.Walk(srcPath, func(srcFilePath string, info os.FileInfo, err error) error { | ||||
| 	err := ensureNotReparsePointRelative(subPath, srcRoot) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error { | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		relPath, err := filepath.Rel(srcPath, srcFilePath) | ||||
| 		relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		destFilePath := filepath.Join(destPath, relPath) | ||||
| 
 | ||||
| 		fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes | ||||
| 		// Directories, reparse points, and files that will be mutated during
 | ||||
| 		// utility VM import must be copied. All other files can be hard linked.
 | ||||
| 		isReparsePoint := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 | ||||
| 		if info.IsDir() || isReparsePoint || mutatedFiles[relPath] { | ||||
| 			fi, err := copyFileWithMetadata(srcFilePath, destFilePath, info.IsDir()) | ||||
| 		isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 | ||||
| 		// In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink.
 | ||||
| 		// See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc
 | ||||
| 		// Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly
 | ||||
| 		isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 | ||||
| 
 | ||||
| 		if isDir || isReparsePoint || mutatedFiles[relPath] { | ||||
| 			fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			if info.IsDir() && !isReparsePoint { | ||||
| 				di = append(di, dirInfo{path: destFilePath, fileInfo: *fi}) | ||||
| 			if isDir && !isReparsePoint { | ||||
| 				di = append(di, dirInfo{path: relPath, fileInfo: *fi}) | ||||
| 			} | ||||
| 		} else { | ||||
| 			err = os.Link(srcFilePath, destFilePath) | ||||
| 			err = linkRelative(relPath, srcRoot, relPath, destRoot) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// Don't recurse on reparse points.
 | ||||
| 		if info.IsDir() && isReparsePoint { | ||||
| 		// Don't recurse on reparse points in go1.8 and older. Filepath.Walk
 | ||||
| 		// handles this in go1.9 and newer.
 | ||||
| 		if isDir && isReparsePoint && shouldSkipDirectoryReparse { | ||||
| 			return filepath.SkipDir | ||||
| 		} | ||||
| 
 | ||||
|  | @ -501,13 +584,11 @@ func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error { | |||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	return reapplyDirectoryTimes(di) | ||||
| 	return reapplyDirectoryTimes(destRoot, di) | ||||
| } | ||||
| 
 | ||||
| func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { | ||||
| 	w.reset() | ||||
| 	err := w.init() | ||||
| 	if err != nil { | ||||
| 	if err := w.reset(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
|  | @ -515,6 +596,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro | |||
| 		return w.initUtilityVM() | ||||
| 	} | ||||
| 
 | ||||
| 	name = filepath.Clean(name) | ||||
| 	if hasPathPrefix(name, utilityVMPath) { | ||||
| 		if !w.HasUtilityVM { | ||||
| 			return errors.New("missing UtilityVM directory") | ||||
|  | @ -522,10 +604,9 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro | |||
| 		if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { | ||||
| 			return errors.New("invalid UtilityVM layer") | ||||
| 		} | ||||
| 		path := filepath.Join(w.destRoot, name) | ||||
| 		createDisposition := uint32(syscall.OPEN_EXISTING) | ||||
| 		createDisposition := uint32(_FILE_OPEN) | ||||
| 		if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { | ||||
| 			st, err := os.Lstat(path) | ||||
| 			st, err := lstatRelative(name, w.destRoot) | ||||
| 			if err != nil && !os.IsNotExist(err) { | ||||
| 				return err | ||||
| 			} | ||||
|  | @ -533,37 +614,44 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro | |||
| 				// Delete the existing file/directory if it is not the same type as this directory.
 | ||||
| 				existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes | ||||
| 				if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { | ||||
| 					if err = os.RemoveAll(path); err != nil { | ||||
| 					if err = removeAllRelative(name, w.destRoot); err != nil { | ||||
| 						return err | ||||
| 					} | ||||
| 					st = nil | ||||
| 				} | ||||
| 			} | ||||
| 			if st == nil { | ||||
| 				if err = os.Mkdir(path, 0); err != nil { | ||||
| 				if err = mkdirRelative(name, w.destRoot); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
| 			if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { | ||||
| 				w.uvmDi = append(w.uvmDi, dirInfo{path: path, fileInfo: *fileInfo}) | ||||
| 				w.uvmDi = append(w.uvmDi, dirInfo{path: name, fileInfo: *fileInfo}) | ||||
| 			} | ||||
| 		} else { | ||||
| 			// Overwrite any existing hard link.
 | ||||
| 			err = os.Remove(path) | ||||
| 			err := removeRelative(name, w.destRoot) | ||||
| 			if err != nil && !os.IsNotExist(err) { | ||||
| 				return err | ||||
| 			} | ||||
| 			createDisposition = syscall.CREATE_NEW | ||||
| 			createDisposition = _FILE_CREATE | ||||
| 		} | ||||
| 
 | ||||
| 		f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition) | ||||
| 		f, err := openRelative( | ||||
| 			name, | ||||
| 			w.destRoot, | ||||
| 			syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, | ||||
| 			syscall.FILE_SHARE_READ, | ||||
| 			createDisposition, | ||||
| 			_FILE_OPEN_REPARSE_POINT, | ||||
| 		) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		defer func() { | ||||
| 			if f != nil { | ||||
| 				f.Close() | ||||
| 				os.Remove(path) | ||||
| 				removeRelative(name, w.destRoot) | ||||
| 			} | ||||
| 		}() | ||||
| 
 | ||||
|  | @ -574,28 +662,31 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro | |||
| 
 | ||||
| 		w.backupWriter = winio.NewBackupFileWriter(f, true) | ||||
| 		w.currentFile = f | ||||
| 		w.currentFileName = name | ||||
| 		w.currentFileRoot = w.destRoot | ||||
| 		w.addedFiles[name] = true | ||||
| 		f = nil | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	path := filepath.Join(w.root, name) | ||||
| 	fname := name | ||||
| 	if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { | ||||
| 		err := os.Mkdir(path, 0) | ||||
| 		err := mkdirRelative(name, w.root) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		path += ".$wcidirs$" | ||||
| 		fname += ".$wcidirs$" | ||||
| 		w.currentIsDir = true | ||||
| 	} | ||||
| 
 | ||||
| 	f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.CREATE_NEW) | ||||
| 	f, err := openRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_CREATE, 0) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer func() { | ||||
| 		if f != nil { | ||||
| 			f.Close() | ||||
| 			os.Remove(path) | ||||
| 			removeRelative(fname, w.root) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
|  | @ -617,19 +708,20 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro | |||
| 	} | ||||
| 
 | ||||
| 	w.currentFile = f | ||||
| 	w.currentFileName = name | ||||
| 	w.currentFileRoot = w.root | ||||
| 	w.addedFiles[name] = true | ||||
| 	f = nil | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (w *legacyLayerWriter) AddLink(name string, target string) error { | ||||
| 	w.reset() | ||||
| 	err := w.init() | ||||
| 	if err != nil { | ||||
| 	if err := w.reset(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	var roots []string | ||||
| 	target = filepath.Clean(target) | ||||
| 	var roots []*os.File | ||||
| 	if hasPathPrefix(target, filesPath) { | ||||
| 		// Look for cross-layer hard link targets in the parent layers, since
 | ||||
| 		// nothing is in the destination path yet.
 | ||||
|  | @ -638,7 +730,7 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error { | |||
| 		// Since the utility VM is fully cloned into the destination path
 | ||||
| 		// already, look for cross-layer hard link targets directly in the
 | ||||
| 		// destination path.
 | ||||
| 		roots = []string{w.destRoot} | ||||
| 		roots = []*os.File{w.destRoot} | ||||
| 	} | ||||
| 
 | ||||
| 	if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { | ||||
|  | @ -647,12 +739,12 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error { | |||
| 
 | ||||
| 	// Find to try the target of the link in a previously added file. If that
 | ||||
| 	// fails, search in parent layers.
 | ||||
| 	var selectedRoot string | ||||
| 	var selectedRoot *os.File | ||||
| 	if _, ok := w.addedFiles[target]; ok { | ||||
| 		selectedRoot = w.destRoot | ||||
| 	} else { | ||||
| 		for _, r := range roots { | ||||
| 			if _, err = os.Lstat(filepath.Join(r, target)); err != nil { | ||||
| 			if _, err := lstatRelative(target, r); err != nil { | ||||
| 				if !os.IsNotExist(err) { | ||||
| 					return err | ||||
| 				} | ||||
|  | @ -661,22 +753,25 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error { | |||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		if selectedRoot == "" { | ||||
| 		if selectedRoot == nil { | ||||
| 			return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// The link can't be written until after the ImportLayer call.
 | ||||
| 	w.PendingLinks = append(w.PendingLinks, pendingLink{ | ||||
| 		Path:   filepath.Join(w.destRoot, name), | ||||
| 		Target: filepath.Join(selectedRoot, target), | ||||
| 		Path:       name, | ||||
| 		Target:     target, | ||||
| 		TargetRoot: selectedRoot, | ||||
| 	}) | ||||
| 	w.addedFiles[name] = true | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (w *legacyLayerWriter) Remove(name string) error { | ||||
| 	name = filepath.Clean(name) | ||||
| 	if hasPathPrefix(name, filesPath) { | ||||
| 		w.tombstones = append(w.tombstones, name[len(filesPath)+1:]) | ||||
| 		w.Tombstones = append(w.Tombstones, name) | ||||
| 	} else if hasPathPrefix(name, utilityVMFilesPath) { | ||||
| 		err := w.initUtilityVM() | ||||
| 		if err != nil { | ||||
|  | @ -685,11 +780,10 @@ func (w *legacyLayerWriter) Remove(name string) error { | |||
| 		// Make sure the path exists; os.RemoveAll will not fail if the file is
 | ||||
| 		// already gone, and this needs to be a fatal error for diagnostics
 | ||||
| 		// purposes.
 | ||||
| 		path := filepath.Join(w.destRoot, name) | ||||
| 		if _, err := os.Lstat(path); err != nil { | ||||
| 		if _, err := lstatRelative(name, w.destRoot); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		err = os.RemoveAll(path) | ||||
| 		err = removeAllRelative(name, w.destRoot) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | @ -711,28 +805,20 @@ func (w *legacyLayerWriter) Write(b []byte) (int, error) { | |||
| } | ||||
| 
 | ||||
| func (w *legacyLayerWriter) Close() error { | ||||
| 	w.reset() | ||||
| 	err := w.init() | ||||
| 	if err != nil { | ||||
| 	if err := w.reset(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	tf, err := os.Create(filepath.Join(w.root, "tombstones.txt")) | ||||
| 	if err != nil { | ||||
| 	if err := removeRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer tf.Close() | ||||
| 	_, err = tf.Write([]byte("\xef\xbb\xbfVersion 1.0\n")) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	for _, t := range w.tombstones { | ||||
| 		_, err = tf.Write([]byte(filepath.Join(`\`, t) + "\n")) | ||||
| 	for _, pd := range w.pendingDirs { | ||||
| 		err := mkdirRelative(pd.Path, pd.Root) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	if w.HasUtilityVM { | ||||
| 		err = reapplyDirectoryTimes(w.uvmDi) | ||||
| 		err := reapplyDirectoryTimes(w.destRoot, w.uvmDi) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  |  | |||
|  | @ -0,0 +1,7 @@ | |||
| // +build !go1.9
 | ||||
| 
 | ||||
| package hcsshim | ||||
| 
 | ||||
| // Due to a bug in go1.8 and before, directory reparse points need to be skipped
 | ||||
| // during filepath.Walk. This is fixed in go1.9
 | ||||
| var shouldSkipDirectoryReparse = true | ||||
|  | @ -0,0 +1,7 @@ | |||
| // +build go1.9
 | ||||
| 
 | ||||
| package hcsshim | ||||
| 
 | ||||
| // Due to a bug in go1.8 and before, directory reparse points need to be skipped
 | ||||
| // during filepath.Walk. This is fixed in go1.9
 | ||||
| var shouldSkipDirectoryReparse = false | ||||
							
								
								
									
										427
									
								
								cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/safeopen.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										427
									
								
								cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/safeopen.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,427 @@ | |||
| package hcsshim | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| 	"syscall" | ||||
| 	"unicode/utf16" | ||||
| 	"unsafe" | ||||
| 
 | ||||
| 	winio "github.com/Microsoft/go-winio" | ||||
| ) | ||||
| 
 | ||||
| //sys ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile
 | ||||
| //sys ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile
 | ||||
| //sys rtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
 | ||||
| //sys localAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc
 | ||||
| //sys localFree(ptr uintptr) = kernel32.LocalFree
 | ||||
| 
 | ||||
| type ioStatusBlock struct { | ||||
| 	Status, Information uintptr | ||||
| } | ||||
| 
 | ||||
| type objectAttributes struct { | ||||
| 	Length             uintptr | ||||
| 	RootDirectory      uintptr | ||||
| 	ObjectName         uintptr | ||||
| 	Attributes         uintptr | ||||
| 	SecurityDescriptor uintptr | ||||
| 	SecurityQoS        uintptr | ||||
| } | ||||
| 
 | ||||
| type unicodeString struct { | ||||
| 	Length        uint16 | ||||
| 	MaximumLength uint16 | ||||
| 	Buffer        uintptr | ||||
| } | ||||
| 
 | ||||
| type fileLinkInformation struct { | ||||
| 	ReplaceIfExists bool | ||||
| 	RootDirectory   uintptr | ||||
| 	FileNameLength  uint32 | ||||
| 	FileName        [1]uint16 | ||||
| } | ||||
| 
 | ||||
| type fileDispositionInformationEx struct { | ||||
| 	Flags uintptr | ||||
| } | ||||
| 
 | ||||
| const ( | ||||
| 	_FileLinkInformation          = 11 | ||||
| 	_FileDispositionInformationEx = 64 | ||||
| 
 | ||||
| 	_FILE_READ_ATTRIBUTES  = 0x0080 | ||||
| 	_FILE_WRITE_ATTRIBUTES = 0x0100 | ||||
| 	_DELETE                = 0x10000 | ||||
| 
 | ||||
| 	_FILE_OPEN   = 1 | ||||
| 	_FILE_CREATE = 2 | ||||
| 
 | ||||
| 	_FILE_DIRECTORY_FILE          = 0x00000001 | ||||
| 	_FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 | ||||
| 	_FILE_DELETE_ON_CLOSE         = 0x00001000 | ||||
| 	_FILE_OPEN_FOR_BACKUP_INTENT  = 0x00004000 | ||||
| 	_FILE_OPEN_REPARSE_POINT      = 0x00200000 | ||||
| 
 | ||||
| 	_FILE_DISPOSITION_DELETE = 0x00000001 | ||||
| 
 | ||||
| 	_OBJ_DONT_REPARSE = 0x1000 | ||||
| 
 | ||||
| 	_STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B | ||||
| ) | ||||
| 
 | ||||
| func openRoot(path string) (*os.File, error) { | ||||
| 	longpath, err := makeLongAbsPath(path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) | ||||
| } | ||||
| 
 | ||||
| func ntRelativePath(path string) ([]uint16, error) { | ||||
| 	path = filepath.Clean(path) | ||||
| 	if strings.Contains(":", path) { | ||||
| 		// Since alternate data streams must follow the file they
 | ||||
| 		// are attached to, finding one here (out of order) is invalid.
 | ||||
| 		return nil, errors.New("path contains invalid character `:`") | ||||
| 	} | ||||
| 	fspath := filepath.FromSlash(path) | ||||
| 	if len(fspath) > 0 && fspath[0] == '\\' { | ||||
| 		return nil, errors.New("expected relative path") | ||||
| 	} | ||||
| 
 | ||||
| 	path16 := utf16.Encode(([]rune)(fspath)) | ||||
| 	if len(path16) > 32767 { | ||||
| 		return nil, syscall.ENAMETOOLONG | ||||
| 	} | ||||
| 
 | ||||
| 	return path16, nil | ||||
| } | ||||
| 
 | ||||
| // openRelativeInternal opens a relative path from the given root, failing if
 | ||||
| // any of the intermediate path components are reparse points.
 | ||||
| func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { | ||||
| 	var ( | ||||
| 		h    uintptr | ||||
| 		iosb ioStatusBlock | ||||
| 		oa   objectAttributes | ||||
| 	) | ||||
| 
 | ||||
| 	path16, err := ntRelativePath(path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if root == nil || root.Fd() == 0 { | ||||
| 		return nil, errors.New("missing root directory") | ||||
| 	} | ||||
| 
 | ||||
| 	upathBuffer := localAlloc(0, int(unsafe.Sizeof(unicodeString{}))+len(path16)*2) | ||||
| 	defer localFree(upathBuffer) | ||||
| 
 | ||||
| 	upath := (*unicodeString)(unsafe.Pointer(upathBuffer)) | ||||
| 	upath.Length = uint16(len(path16) * 2) | ||||
| 	upath.MaximumLength = upath.Length | ||||
| 	upath.Buffer = upathBuffer + unsafe.Sizeof(*upath) | ||||
| 	copy((*[32768]uint16)(unsafe.Pointer(upath.Buffer))[:], path16) | ||||
| 
 | ||||
| 	oa.Length = unsafe.Sizeof(oa) | ||||
| 	oa.ObjectName = upathBuffer | ||||
| 	oa.RootDirectory = uintptr(root.Fd()) | ||||
| 	oa.Attributes = _OBJ_DONT_REPARSE | ||||
| 	status := ntCreateFile( | ||||
| 		&h, | ||||
| 		accessMask|syscall.SYNCHRONIZE, | ||||
| 		&oa, | ||||
| 		&iosb, | ||||
| 		nil, | ||||
| 		0, | ||||
| 		shareFlags, | ||||
| 		createDisposition, | ||||
| 		_FILE_OPEN_FOR_BACKUP_INTENT|_FILE_SYNCHRONOUS_IO_NONALERT|flags, | ||||
| 		nil, | ||||
| 		0, | ||||
| 	) | ||||
| 	if status != 0 { | ||||
| 		return nil, rtlNtStatusToDosError(status) | ||||
| 	} | ||||
| 
 | ||||
| 	fullPath, err := makeLongAbsPath(filepath.Join(root.Name(), path)) | ||||
| 	if err != nil { | ||||
| 		syscall.Close(syscall.Handle(h)) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return os.NewFile(h, fullPath), nil | ||||
| } | ||||
| 
 | ||||
| // openRelative opens a relative path from the given root, failing if
 | ||||
| // any of the intermediate path components are reparse points.
 | ||||
| func openRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { | ||||
| 	f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags) | ||||
| 	if err != nil { | ||||
| 		err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err} | ||||
| 	} | ||||
| 	return f, err | ||||
| } | ||||
| 
 | ||||
| // linkRelative creates a hard link from oldname to newname (relative to oldroot
 | ||||
| // and newroot), failing if any of the intermediate path components are reparse
 | ||||
| // points.
 | ||||
| func linkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error { | ||||
| 	// Open the old file.
 | ||||
| 	oldf, err := openRelativeInternal( | ||||
| 		oldname, | ||||
| 		oldroot, | ||||
| 		syscall.FILE_WRITE_ATTRIBUTES, | ||||
| 		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, | ||||
| 		_FILE_OPEN, | ||||
| 		0, | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err} | ||||
| 	} | ||||
| 	defer oldf.Close() | ||||
| 
 | ||||
| 	// Open the parent of the new file.
 | ||||
| 	var parent *os.File | ||||
| 	parentPath := filepath.Dir(newname) | ||||
| 	if parentPath != "." { | ||||
| 		parent, err = openRelativeInternal( | ||||
| 			parentPath, | ||||
| 			newroot, | ||||
| 			syscall.GENERIC_READ, | ||||
| 			syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, | ||||
| 			_FILE_OPEN, | ||||
| 			_FILE_DIRECTORY_FILE) | ||||
| 		if err != nil { | ||||
| 			return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} | ||||
| 		} | ||||
| 		defer parent.Close() | ||||
| 
 | ||||
| 		fi, err := winio.GetFileBasicInfo(parent) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { | ||||
| 			return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: rtlNtStatusToDosError(_STATUS_REPARSE_POINT_ENCOUNTERED)} | ||||
| 		} | ||||
| 
 | ||||
| 	} else { | ||||
| 		parent = newroot | ||||
| 	} | ||||
| 
 | ||||
| 	// Issue an NT call to create the link. This will be safe because NT will
 | ||||
| 	// not open any more directories to create the link, so it cannot walk any
 | ||||
| 	// more reparse points.
 | ||||
| 	newbase := filepath.Base(newname) | ||||
| 	newbase16, err := ntRelativePath(newbase) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	size := int(unsafe.Offsetof(fileLinkInformation{}.FileName)) + len(newbase16)*2 | ||||
| 	linkinfoBuffer := localAlloc(0, size) | ||||
| 	defer localFree(linkinfoBuffer) | ||||
| 	linkinfo := (*fileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) | ||||
| 	linkinfo.RootDirectory = parent.Fd() | ||||
| 	linkinfo.FileNameLength = uint32(len(newbase16) * 2) | ||||
| 	copy((*[32768]uint16)(unsafe.Pointer(&linkinfo.FileName[0]))[:], newbase16) | ||||
| 
 | ||||
| 	var iosb ioStatusBlock | ||||
| 	status := ntSetInformationFile( | ||||
| 		oldf.Fd(), | ||||
| 		&iosb, | ||||
| 		linkinfoBuffer, | ||||
| 		uint32(size), | ||||
| 		_FileLinkInformation, | ||||
| 	) | ||||
| 	if status != 0 { | ||||
| 		return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: rtlNtStatusToDosError(status)} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // deleteOnClose marks a file to be deleted when the handle is closed.
 | ||||
| func deleteOnClose(f *os.File) error { | ||||
| 	disposition := fileDispositionInformationEx{Flags: _FILE_DISPOSITION_DELETE} | ||||
| 	var iosb ioStatusBlock | ||||
| 	status := ntSetInformationFile( | ||||
| 		f.Fd(), | ||||
| 		&iosb, | ||||
| 		uintptr(unsafe.Pointer(&disposition)), | ||||
| 		uint32(unsafe.Sizeof(disposition)), | ||||
| 		_FileDispositionInformationEx, | ||||
| 	) | ||||
| 	if status != 0 { | ||||
| 		return rtlNtStatusToDosError(status) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // clearReadOnly clears the readonly attribute on a file.
 | ||||
| func clearReadOnly(f *os.File) error { | ||||
| 	bi, err := winio.GetFileBasicInfo(f) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	sbi := winio.FileBasicInfo{ | ||||
| 		FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY, | ||||
| 	} | ||||
| 	if sbi.FileAttributes == 0 { | ||||
| 		sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL | ||||
| 	} | ||||
| 	return winio.SetFileBasicInfo(f, &sbi) | ||||
| } | ||||
| 
 | ||||
| // removeRelative removes a file or directory relative to a root, failing if any
 | ||||
| // intermediate path components are reparse points.
 | ||||
| func removeRelative(path string, root *os.File) error { | ||||
| 	f, err := openRelativeInternal( | ||||
| 		path, | ||||
| 		root, | ||||
| 		_FILE_READ_ATTRIBUTES|_FILE_WRITE_ATTRIBUTES|_DELETE, | ||||
| 		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, | ||||
| 		_FILE_OPEN, | ||||
| 		_FILE_OPEN_REPARSE_POINT) | ||||
| 	if err == nil { | ||||
| 		defer f.Close() | ||||
| 		err = deleteOnClose(f) | ||||
| 		if err == syscall.ERROR_ACCESS_DENIED { | ||||
| 			// Maybe the file is marked readonly. Clear the bit and retry.
 | ||||
| 			clearReadOnly(f) | ||||
| 			err = deleteOnClose(f) | ||||
| 		} | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // removeAllRelative removes a directory tree relative to a root, failing if any
 | ||||
| // intermediate path components are reparse points.
 | ||||
| func removeAllRelative(path string, root *os.File) error { | ||||
| 	fi, err := lstatRelative(path, root) | ||||
| 	if err != nil { | ||||
| 		if os.IsNotExist(err) { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
| 	fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes | ||||
| 	if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { | ||||
| 		// If this is a reparse point, it can't have children. Simple remove will do.
 | ||||
| 		err := removeRelative(path, root) | ||||
| 		if err == nil || os.IsNotExist(err) { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	// It is necessary to use os.Open as Readdirnames does not work with
 | ||||
| 	// openRelative. This is safe because the above lstatrelative fails
 | ||||
| 	// if the target is outside the root, and we know this is not a
 | ||||
| 	// symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check.
 | ||||
| 	fd, err := os.Open(filepath.Join(root.Name(), path)) | ||||
| 	if err != nil { | ||||
| 		if os.IsNotExist(err) { | ||||
| 			// Race. It was deleted between the Lstat and Open.
 | ||||
| 			// Return nil per RemoveAll's docs.
 | ||||
| 			return nil | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	// Remove contents & return first error.
 | ||||
| 	for { | ||||
| 		names, err1 := fd.Readdirnames(100) | ||||
| 		for _, name := range names { | ||||
| 			err1 := removeAllRelative(path+string(os.PathSeparator)+name, root) | ||||
| 			if err == nil { | ||||
| 				err = err1 | ||||
| 			} | ||||
| 		} | ||||
| 		if err1 == io.EOF { | ||||
| 			break | ||||
| 		} | ||||
| 		// If Readdirnames returned an error, use it.
 | ||||
| 		if err == nil { | ||||
| 			err = err1 | ||||
| 		} | ||||
| 		if len(names) == 0 { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	fd.Close() | ||||
| 
 | ||||
| 	// Remove directory.
 | ||||
| 	err1 := removeRelative(path, root) | ||||
| 	if err1 == nil || os.IsNotExist(err1) { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if err == nil { | ||||
| 		err = err1 | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| // mkdirRelative creates a directory relative to a root, failing if any
 | ||||
| // intermediate path components are reparse points.
 | ||||
| func mkdirRelative(path string, root *os.File) error { | ||||
| 	f, err := openRelativeInternal( | ||||
| 		path, | ||||
| 		root, | ||||
| 		0, | ||||
| 		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, | ||||
| 		_FILE_CREATE, | ||||
| 		_FILE_DIRECTORY_FILE) | ||||
| 	if err == nil { | ||||
| 		f.Close() | ||||
| 	} else { | ||||
| 		err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err} | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| // lstatRelative performs a stat operation on a file relative to a root, failing
 | ||||
| // if any intermediate path components are reparse points.
 | ||||
| func lstatRelative(path string, root *os.File) (os.FileInfo, error) { | ||||
| 	f, err := openRelativeInternal( | ||||
| 		path, | ||||
| 		root, | ||||
| 		_FILE_READ_ATTRIBUTES, | ||||
| 		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, | ||||
| 		_FILE_OPEN, | ||||
| 		_FILE_OPEN_REPARSE_POINT) | ||||
| 	if err != nil { | ||||
| 		return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 	return f.Stat() | ||||
| } | ||||
| 
 | ||||
| // ensureNotReparsePointRelative validates that a given file (relative to a
 | ||||
| // root) and all intermediate path components are not a reparse points.
 | ||||
| func ensureNotReparsePointRelative(path string, root *os.File) error { | ||||
| 	// Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT.
 | ||||
| 	f, err := openRelative( | ||||
| 		path, | ||||
| 		root, | ||||
| 		0, | ||||
| 		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, | ||||
| 		_FILE_OPEN, | ||||
| 		0) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	f.Close() | ||||
| 	return nil | ||||
| } | ||||
|  | @ -41,6 +41,8 @@ var ( | |||
| 	modole32     = windows.NewLazySystemDLL("ole32.dll") | ||||
| 	modiphlpapi  = windows.NewLazySystemDLL("iphlpapi.dll") | ||||
| 	modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") | ||||
| 	modntdll     = windows.NewLazySystemDLL("ntdll.dll") | ||||
| 	modkernel32  = windows.NewLazySystemDLL("kernel32.dll") | ||||
| 
 | ||||
| 	procCoTaskMemFree                      = modole32.NewProc("CoTaskMemFree") | ||||
| 	procSetCurrentThreadCompartmentId      = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") | ||||
|  | @ -94,6 +96,11 @@ var ( | |||
| 	procHcsUnregisterProcessCallback       = modvmcompute.NewProc("HcsUnregisterProcessCallback") | ||||
| 	procHcsModifyServiceSettings           = modvmcompute.NewProc("HcsModifyServiceSettings") | ||||
| 	procHNSCall                            = modvmcompute.NewProc("HNSCall") | ||||
| 	procNtCreateFile                       = modntdll.NewProc("NtCreateFile") | ||||
| 	procNtSetInformationFile               = modntdll.NewProc("NtSetInformationFile") | ||||
| 	procRtlNtStatusToDosErrorNoTeb         = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") | ||||
| 	procLocalAlloc                         = modkernel32.NewProc("LocalAlloc") | ||||
| 	procLocalFree                          = modkernel32.NewProc("LocalFree") | ||||
| ) | ||||
| 
 | ||||
| func coTaskMemFree(buffer unsafe.Pointer) { | ||||
|  | @ -1040,3 +1047,34 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) | |||
| 	} | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| func ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { | ||||
| 	r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) | ||||
| 	status = uint32(r0) | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| func ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { | ||||
| 	r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) | ||||
| 	status = uint32(r0) | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| func rtlNtStatusToDosError(status uint32) (winerr error) { | ||||
| 	r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) | ||||
| 	if r0 != 0 { | ||||
| 		winerr = syscall.Errno(r0) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| func localAlloc(flags uint32, size int) (ptr uintptr) { | ||||
| 	r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) | ||||
| 	ptr = uintptr(r0) | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| func localFree(ptr uintptr) { | ||||
| 	syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) | ||||
| 	return | ||||
| } | ||||
|  |  | |||
|  | @ -1,5 +0,0 @@ | |||
| *~ | ||||
| *.a | ||||
| *.6 | ||||
| *.out | ||||
| _testmain.go | ||||
|  | @ -1,178 +0,0 @@ | |||
| 
 | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
| 
 | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
| 
 | ||||
|    1. Definitions. | ||||
| 
 | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
| 
 | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
| 
 | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
| 
 | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
| 
 | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
| 
 | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
| 
 | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
| 
 | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
| 
 | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
| 
 | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
| 
 | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
| 
 | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
| 
 | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
| 
 | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
| 
 | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
| 
 | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
| 
 | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
| 
 | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
| 
 | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
| 
 | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
| 
 | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
| 
 | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
| 
 | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
| 
 | ||||
|    END OF TERMS AND CONDITIONS | ||||
| 
 | ||||
|  | @ -1,12 +0,0 @@ | |||
| include $(GOROOT)/src/Make.inc | ||||
| 
 | ||||
| TARG=auth_digest | ||||
| GOFILES=\
 | ||||
| 	auth.go\
 | ||||
| 	digest.go\
 | ||||
| 	basic.go\
 | ||||
| 	misc.go\
 | ||||
| 	md5crypt.go\
 | ||||
| 	users.go\
 | ||||
| 
 | ||||
| include $(GOROOT)/src/Make.pkg | ||||
|  | @ -1,70 +0,0 @@ | |||
| HTTP Authentication implementation in Go | ||||
| ======================================== | ||||
| 
 | ||||
| This is an implementation of HTTP Basic and HTTP Digest authentication | ||||
| in Go language. It is designed as a simple wrapper for | ||||
| http.RequestHandler functions. | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
|   | ||||
|  * Supports HTTP Basic and HTTP Digest authentication. | ||||
|  * Supports htpasswd and htdigest formatted files. | ||||
|  * Automatic reloading of password files. | ||||
|  * Pluggable interface for user/password storage. | ||||
|  * Supports MD5 and SHA1 for Basic authentication password storage. | ||||
|  * Configurable Digest nonce cache size with expiration. | ||||
|  * Wrapper for legacy http handlers (http.HandlerFunc interface) | ||||
|   | ||||
| Example usage | ||||
| ------------- | ||||
| 
 | ||||
| This is a complete working example for Basic auth: | ||||
| 
 | ||||
|     package main | ||||
| 
 | ||||
|     import ( | ||||
|             auth "github.com/abbot/go-http-auth" | ||||
|             "fmt" | ||||
|             "net/http" | ||||
|     ) | ||||
| 
 | ||||
|     func Secret(user, realm string) string { | ||||
|             if user == "john" { | ||||
|                     // password is "hello" | ||||
|                     return "$1$dlPL2MqE$oQmn16q49SqdmhenQuNgs1" | ||||
|             } | ||||
|             return "" | ||||
|     } | ||||
| 
 | ||||
|     func handle(w http.ResponseWriter, r *auth.AuthenticatedRequest) { | ||||
|             fmt.Fprintf(w, "<html><body><h1>Hello, %s!</h1></body></html>", r.Username) | ||||
|     } | ||||
| 
 | ||||
|     func main() { | ||||
|             authenticator := auth.NewBasicAuthenticator("example.com", Secret) | ||||
|             http.HandleFunc("/", authenticator.Wrap(handle)) | ||||
|             http.ListenAndServe(":8080", nil) | ||||
|     } | ||||
| 
 | ||||
| See more examples in the "examples" directory. | ||||
| 
 | ||||
| Legal | ||||
| ----- | ||||
| 
 | ||||
| This module is developed under Apache 2.0 license, and can be used for | ||||
| open and proprietary projects. | ||||
| 
 | ||||
| Copyright 2012-2013 Lev Shamardin | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); you | ||||
| may not use this file or any other part of this project except in | ||||
| compliance with the License. You may obtain a copy of the License at | ||||
| 
 | ||||
| http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
| implied. See the License for the specific language governing | ||||
| permissions and limitations under the License. | ||||
|  | @ -1,48 +0,0 @@ | |||
| package auth | ||||
| 
 | ||||
| import "net/http" | ||||
| 
 | ||||
| /*  | ||||
|  Request handlers must take AuthenticatedRequest instead of http.Request | ||||
| */ | ||||
| type AuthenticatedRequest struct { | ||||
| 	http.Request | ||||
| 	/*  | ||||
| 	 Authenticated user name. Current API implies that Username is | ||||
| 	 never empty, which means that authentication is always done | ||||
| 	 before calling the request handler. | ||||
| 	*/ | ||||
| 	Username string | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  AuthenticatedHandlerFunc is like http.HandlerFunc, but takes | ||||
|  AuthenticatedRequest instead of http.Request | ||||
| */ | ||||
| type AuthenticatedHandlerFunc func(http.ResponseWriter, *AuthenticatedRequest) | ||||
| 
 | ||||
| /* | ||||
|  Authenticator wraps an AuthenticatedHandlerFunc with | ||||
|  authentication-checking code. | ||||
| 
 | ||||
|  Typical Authenticator usage is something like: | ||||
| 
 | ||||
|    authenticator := SomeAuthenticator(...) | ||||
|    http.HandleFunc("/", authenticator(my_handler)) | ||||
| 
 | ||||
|  Authenticator wrapper checks the user authentication and calls the | ||||
|  wrapped function only after authentication has succeeded. Otherwise, | ||||
|  it returns a handler which initiates the authentication procedure. | ||||
| */ | ||||
| type Authenticator func(AuthenticatedHandlerFunc) http.HandlerFunc | ||||
| 
 | ||||
| type AuthenticatorInterface interface { | ||||
| 	Wrap(AuthenticatedHandlerFunc) http.HandlerFunc | ||||
| } | ||||
| 
 | ||||
| func JustCheck(auth AuthenticatorInterface, wrapped http.HandlerFunc) http.HandlerFunc { | ||||
| 	return auth.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) { | ||||
| 		ar.Header.Set("X-Authenticated-Username", ar.Username) | ||||
| 		wrapped(w, &ar.Request) | ||||
| 	}) | ||||
| } | ||||
|  | @ -1,88 +0,0 @@ | |||
| package auth | ||||
| 
 | ||||
| import ( | ||||
| 	"crypto/sha1" | ||||
| 	"encoding/base64" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| type BasicAuth struct { | ||||
| 	Realm   string | ||||
| 	Secrets SecretProvider | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  Checks the username/password combination from the request. Returns | ||||
|  either an empty string (authentication failed) or the name of the | ||||
|  authenticated user. | ||||
| 
 | ||||
|  Supports MD5 and SHA1 password entries | ||||
| */ | ||||
| func (a *BasicAuth) CheckAuth(r *http.Request) string { | ||||
| 	s := strings.SplitN(r.Header.Get("Authorization"), " ", 2) | ||||
| 	if len(s) != 2 || s[0] != "Basic" { | ||||
| 		return "" | ||||
| 	} | ||||
| 
 | ||||
| 	b, err := base64.StdEncoding.DecodeString(s[1]) | ||||
| 	if err != nil { | ||||
| 		return "" | ||||
| 	} | ||||
| 	pair := strings.SplitN(string(b), ":", 2) | ||||
| 	if len(pair) != 2 { | ||||
| 		return "" | ||||
| 	} | ||||
| 	passwd := a.Secrets(pair[0], a.Realm) | ||||
| 	if passwd == "" { | ||||
| 		return "" | ||||
| 	} | ||||
| 	if strings.HasPrefix(passwd, "{SHA}") { | ||||
| 		d := sha1.New() | ||||
| 		d.Write([]byte(pair[1])) | ||||
| 		if passwd[5:] != base64.StdEncoding.EncodeToString(d.Sum(nil)) { | ||||
| 			return "" | ||||
| 		} | ||||
| 	} else { | ||||
| 		e := NewMD5Entry(passwd) | ||||
| 		if e == nil { | ||||
| 			return "" | ||||
| 		} | ||||
| 		if passwd != string(MD5Crypt([]byte(pair[1]), e.Salt, e.Magic)) { | ||||
| 			return "" | ||||
| 		} | ||||
| 	} | ||||
| 	return pair[0] | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  http.Handler for BasicAuth which initiates the authentication process | ||||
|  (or requires reauthentication). | ||||
| */ | ||||
| func (a *BasicAuth) RequireAuth(w http.ResponseWriter, r *http.Request) { | ||||
| 	w.Header().Set("WWW-Authenticate", `Basic realm="`+a.Realm+`"`) | ||||
| 	w.WriteHeader(401) | ||||
| 	w.Write([]byte("401 Unauthorized\n")) | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  BasicAuthenticator returns a function, which wraps an | ||||
|  AuthenticatedHandlerFunc converting it to http.HandlerFunc. This | ||||
|  wrapper function checks the authentication and either sends back | ||||
|  required authentication headers, or calls the wrapped function with | ||||
|  authenticated username in the AuthenticatedRequest. | ||||
| */ | ||||
| func (a *BasicAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc { | ||||
| 	return func(w http.ResponseWriter, r *http.Request) { | ||||
| 		if username := a.CheckAuth(r); username == "" { | ||||
| 			a.RequireAuth(w, r) | ||||
| 		} else { | ||||
| 			ar := &AuthenticatedRequest{Request: *r, Username: username} | ||||
| 			wrapped(w, ar) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func NewBasicAuthenticator(realm string, secrets SecretProvider) *BasicAuth { | ||||
| 	return &BasicAuth{Realm: realm, Secrets: secrets} | ||||
| } | ||||
|  | @ -1,226 +0,0 @@ | |||
| package auth | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"net/url" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| type digest_client struct { | ||||
| 	nc        uint64 | ||||
| 	last_seen int64 | ||||
| } | ||||
| 
 | ||||
| type DigestAuth struct { | ||||
| 	Realm            string | ||||
| 	Opaque           string | ||||
| 	Secrets          SecretProvider | ||||
| 	PlainTextSecrets bool | ||||
| 
 | ||||
| 	/*  | ||||
| 	 Approximate size of Client's Cache. When actual number of | ||||
| 	 tracked client nonces exceeds | ||||
| 	 ClientCacheSize+ClientCacheTolerance, ClientCacheTolerance*2 | ||||
| 	 older entries are purged. | ||||
| 	*/ | ||||
| 	ClientCacheSize      int | ||||
| 	ClientCacheTolerance int | ||||
| 
 | ||||
| 	clients map[string]*digest_client | ||||
| 	mutex   sync.Mutex | ||||
| } | ||||
| 
 | ||||
| type digest_cache_entry struct { | ||||
| 	nonce     string | ||||
| 	last_seen int64 | ||||
| } | ||||
| 
 | ||||
| type digest_cache []digest_cache_entry | ||||
| 
 | ||||
| func (c digest_cache) Less(i, j int) bool { | ||||
| 	return c[i].last_seen < c[j].last_seen | ||||
| } | ||||
| 
 | ||||
| func (c digest_cache) Len() int { | ||||
| 	return len(c) | ||||
| } | ||||
| 
 | ||||
| func (c digest_cache) Swap(i, j int) { | ||||
| 	c[i], c[j] = c[j], c[i] | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  Remove count oldest entries from DigestAuth.clients | ||||
| */ | ||||
| func (a *DigestAuth) Purge(count int) { | ||||
| 	entries := make([]digest_cache_entry, 0, len(a.clients)) | ||||
| 	for nonce, client := range a.clients { | ||||
| 		entries = append(entries, digest_cache_entry{nonce, client.last_seen}) | ||||
| 	} | ||||
| 	cache := digest_cache(entries) | ||||
| 	sort.Sort(cache) | ||||
| 	for _, client := range cache[:count] { | ||||
| 		delete(a.clients, client.nonce) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  http.Handler for DigestAuth which initiates the authentication process | ||||
|  (or requires reauthentication). | ||||
| */ | ||||
| func (a *DigestAuth) RequireAuth(w http.ResponseWriter, r *http.Request) { | ||||
| 	if len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance { | ||||
| 		a.Purge(a.ClientCacheTolerance * 2) | ||||
| 	} | ||||
| 	nonce := RandomKey() | ||||
| 	a.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()} | ||||
| 	w.Header().Set("WWW-Authenticate", | ||||
| 		fmt.Sprintf(`Digest realm="%s", nonce="%s", opaque="%s", algorithm="MD5", qop="auth"`, | ||||
| 			a.Realm, nonce, a.Opaque)) | ||||
| 	w.WriteHeader(401) | ||||
| 	w.Write([]byte("401 Unauthorized\n")) | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  Parse Authorization header from the http.Request. Returns a map of | ||||
|  auth parameters or nil if the header is not a valid parsable Digest | ||||
|  auth header. | ||||
| */ | ||||
| func DigestAuthParams(r *http.Request) map[string]string { | ||||
| 	s := strings.SplitN(r.Header.Get("Authorization"), " ", 2) | ||||
| 	if len(s) != 2 || s[0] != "Digest" { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	result := map[string]string{} | ||||
| 	for _, kv := range strings.Split(s[1], ",") { | ||||
| 		parts := strings.SplitN(kv, "=", 2) | ||||
| 		if len(parts) != 2 { | ||||
| 			continue | ||||
| 		} | ||||
| 		result[strings.Trim(parts[0], "\" ")] = strings.Trim(parts[1], "\" ") | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
| 
 | ||||
| /*  | ||||
|  Check if request contains valid authentication data. Returns a pair | ||||
|  of username, authinfo where username is the name of the authenticated | ||||
|  user or an empty string and authinfo is the contents for the optional | ||||
|  Authentication-Info response header. | ||||
| */ | ||||
| func (da *DigestAuth) CheckAuth(r *http.Request) (username string, authinfo *string) { | ||||
| 	da.mutex.Lock() | ||||
| 	defer da.mutex.Unlock() | ||||
| 	username = "" | ||||
| 	authinfo = nil | ||||
| 	auth := DigestAuthParams(r) | ||||
| 	if auth == nil || da.Opaque != auth["opaque"] || auth["algorithm"] != "MD5" || auth["qop"] != "auth" { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	// Check if the requested URI matches auth header
 | ||||
| 	switch u, err := url.Parse(auth["uri"]); { | ||||
| 	case err != nil: | ||||
| 		return | ||||
| 	case r.URL == nil: | ||||
| 		return | ||||
| 	case len(u.Path) > len(r.URL.Path): | ||||
| 		return | ||||
| 	case !strings.HasPrefix(r.URL.Path, u.Path): | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	HA1 := da.Secrets(auth["username"], da.Realm) | ||||
| 	if da.PlainTextSecrets { | ||||
| 		HA1 = H(auth["username"] + ":" + da.Realm + ":" + HA1) | ||||
| 	} | ||||
| 	HA2 := H(r.Method + ":" + auth["uri"]) | ||||
| 	KD := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], HA2}, ":")) | ||||
| 
 | ||||
| 	if KD != auth["response"] { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	// At this point crypto checks are completed and validated.
 | ||||
| 	// Now check if the session is valid.
 | ||||
| 
 | ||||
| 	nc, err := strconv.ParseUint(auth["nc"], 16, 64) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	if client, ok := da.clients[auth["nonce"]]; !ok { | ||||
| 		return | ||||
| 	} else { | ||||
| 		if client.nc != 0 && client.nc >= nc { | ||||
| 			return | ||||
| 		} | ||||
| 		client.nc = nc | ||||
| 		client.last_seen = time.Now().UnixNano() | ||||
| 	} | ||||
| 
 | ||||
| 	resp_HA2 := H(":" + auth["uri"]) | ||||
| 	rspauth := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], resp_HA2}, ":")) | ||||
| 
 | ||||
| 	info := fmt.Sprintf(`qop="auth", rspauth="%s", cnonce="%s", nc="%s"`, rspauth, auth["cnonce"], auth["nc"]) | ||||
| 	return auth["username"], &info | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  Default values for ClientCacheSize and ClientCacheTolerance for DigestAuth | ||||
| */ | ||||
| const DefaultClientCacheSize = 1000 | ||||
| const DefaultClientCacheTolerance = 100 | ||||
| 
 | ||||
| /*  | ||||
|  Wrap returns an Authenticator which uses HTTP Digest | ||||
|  authentication. Arguments: | ||||
| 
 | ||||
|  realm: The authentication realm. | ||||
| 
 | ||||
|  secrets: SecretProvider which must return HA1 digests for the same | ||||
|  realm as above. | ||||
| */ | ||||
| func (a *DigestAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc { | ||||
| 	return func(w http.ResponseWriter, r *http.Request) { | ||||
| 		if username, authinfo := a.CheckAuth(r); username == "" { | ||||
| 			a.RequireAuth(w, r) | ||||
| 		} else { | ||||
| 			ar := &AuthenticatedRequest{Request: *r, Username: username} | ||||
| 			if authinfo != nil { | ||||
| 				w.Header().Set("Authentication-Info", *authinfo) | ||||
| 			} | ||||
| 			wrapped(w, ar) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*  | ||||
|  JustCheck returns function which converts an http.HandlerFunc into a | ||||
|  http.HandlerFunc which requires authentication. Username is passed as | ||||
|  an extra X-Authenticated-Username header. | ||||
| */ | ||||
| func (a *DigestAuth) JustCheck(wrapped http.HandlerFunc) http.HandlerFunc { | ||||
| 	return a.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) { | ||||
| 		ar.Header.Set("X-Authenticated-Username", ar.Username) | ||||
| 		wrapped(w, &ar.Request) | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| func NewDigestAuthenticator(realm string, secrets SecretProvider) *DigestAuth { | ||||
| 	da := &DigestAuth{ | ||||
| 		Opaque:               RandomKey(), | ||||
| 		Realm:                realm, | ||||
| 		Secrets:              secrets, | ||||
| 		PlainTextSecrets:     false, | ||||
| 		ClientCacheSize:      DefaultClientCacheSize, | ||||
| 		ClientCacheTolerance: DefaultClientCacheTolerance, | ||||
| 		clients:              map[string]*digest_client{}} | ||||
| 	return da | ||||
| } | ||||
|  | @ -1,92 +0,0 @@ | |||
| package auth | ||||
| 
 | ||||
| import "crypto/md5" | ||||
| import "strings" | ||||
| 
 | ||||
| const itoa64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" | ||||
| 
 | ||||
| var md5_crypt_swaps = [16]int{12, 6, 0, 13, 7, 1, 14, 8, 2, 15, 9, 3, 5, 10, 4, 11} | ||||
| 
 | ||||
| type MD5Entry struct { | ||||
| 	Magic, Salt, Hash []byte | ||||
| } | ||||
| 
 | ||||
| func NewMD5Entry(e string) *MD5Entry { | ||||
| 	parts := strings.SplitN(e, "$", 4) | ||||
| 	if len(parts) != 4 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return &MD5Entry{ | ||||
| 		Magic: []byte("$" + parts[1] + "$"), | ||||
| 		Salt:  []byte(parts[2]), | ||||
| 		Hash:  []byte(parts[3]), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  MD5 password crypt implementation | ||||
| */ | ||||
| func MD5Crypt(password, salt, magic []byte) []byte { | ||||
| 	d := md5.New() | ||||
| 
 | ||||
| 	d.Write(password) | ||||
| 	d.Write(magic) | ||||
| 	d.Write(salt) | ||||
| 
 | ||||
| 	d2 := md5.New() | ||||
| 	d2.Write(password) | ||||
| 	d2.Write(salt) | ||||
| 	d2.Write(password) | ||||
| 
 | ||||
| 	for i, mixin := 0, d2.Sum(nil); i < len(password); i++ { | ||||
| 		d.Write([]byte{mixin[i%16]}) | ||||
| 	} | ||||
| 
 | ||||
| 	for i := len(password); i != 0; i >>= 1 { | ||||
| 		if i&1 == 0 { | ||||
| 			d.Write([]byte{password[0]}) | ||||
| 		} else { | ||||
| 			d.Write([]byte{0}) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	final := d.Sum(nil) | ||||
| 
 | ||||
| 	for i := 0; i < 1000; i++ { | ||||
| 		d2 := md5.New() | ||||
| 		if i&1 == 0 { | ||||
| 			d2.Write(final) | ||||
| 		} else { | ||||
| 			d2.Write(password) | ||||
| 		} | ||||
| 
 | ||||
| 		if i%3 != 0 { | ||||
| 			d2.Write(salt) | ||||
| 		} | ||||
| 
 | ||||
| 		if i%7 != 0 { | ||||
| 			d2.Write(password) | ||||
| 		} | ||||
| 
 | ||||
| 		if i&1 == 0 { | ||||
| 			d2.Write(password) | ||||
| 		} else { | ||||
| 			d2.Write(final) | ||||
| 		} | ||||
| 		final = d2.Sum(nil) | ||||
| 	} | ||||
| 
 | ||||
| 	result := make([]byte, 0, 22) | ||||
| 	v := uint(0) | ||||
| 	bits := uint(0) | ||||
| 	for _, i := range md5_crypt_swaps { | ||||
| 		v |= (uint(final[i]) << bits) | ||||
| 		for bits = bits + 8; bits > 6; bits -= 6 { | ||||
| 			result = append(result, itoa64[v&0x3f]) | ||||
| 			v >>= 6 | ||||
| 		} | ||||
| 	} | ||||
| 	result = append(result, itoa64[v&0x3f]) | ||||
| 
 | ||||
| 	return append(append(append(magic, salt...), '$'), result...) | ||||
| } | ||||
|  | @ -1,30 +0,0 @@ | |||
| package auth | ||||
| 
 | ||||
| import "encoding/base64" | ||||
| import "crypto/md5" | ||||
| import "crypto/rand" | ||||
| import "fmt" | ||||
| 
 | ||||
| /* | ||||
|  Return a random 16-byte base64 alphabet string | ||||
| */ | ||||
| func RandomKey() string { | ||||
| 	k := make([]byte, 12) | ||||
| 	for bytes := 0; bytes < len(k); { | ||||
| 		n, err := rand.Read(k[bytes:]) | ||||
| 		if err != nil { | ||||
| 			panic("rand.Read() failed") | ||||
| 		} | ||||
| 		bytes += n | ||||
| 	} | ||||
| 	return base64.StdEncoding.EncodeToString(k) | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  H function for MD5 algorithm (returns a lower-case hex MD5 digest) | ||||
| */ | ||||
| func H(data string) string { | ||||
| 	digest := md5.New() | ||||
| 	digest.Write([]byte(data)) | ||||
| 	return fmt.Sprintf("%x", digest.Sum(nil)) | ||||
| } | ||||
|  | @ -1 +0,0 @@ | |||
| test:example.com:aa78524fceb0e50fd8ca96dd818b8cf9 | ||||
|  | @ -1,2 +0,0 @@ | |||
| test:{SHA}qvTGHdzF6KLavt4PO0gs2a6pQ00= | ||||
| test2:$apr1$a0j62R97$mYqFkloXH0/UOaUnAiV2b0 | ||||
|  | @ -1,136 +0,0 @@ | |||
| package auth | ||||
| 
 | ||||
| import "encoding/csv" | ||||
| import "os" | ||||
| 
 | ||||
| /*  | ||||
|  SecretProvider is used by authenticators. Takes user name and realm | ||||
|  as an argument, returns secret required for authentication (HA1 for | ||||
|  digest authentication, properly encrypted password for basic). | ||||
| */ | ||||
| type SecretProvider func(user, realm string) string | ||||
| 
 | ||||
| /* | ||||
|  Common functions for file auto-reloading | ||||
| */ | ||||
| type File struct { | ||||
| 	Path string | ||||
| 	Info os.FileInfo | ||||
| 	/* must be set in inherited types during initialization */ | ||||
| 	Reload func() | ||||
| } | ||||
| 
 | ||||
| func (f *File) ReloadIfNeeded() { | ||||
| 	info, err := os.Stat(f.Path) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	if f.Info == nil || f.Info.ModTime() != info.ModTime() { | ||||
| 		f.Info = info | ||||
| 		f.Reload() | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  Structure used for htdigest file authentication. Users map realms to | ||||
|  maps of users to their HA1 digests. | ||||
| */ | ||||
| type HtdigestFile struct { | ||||
| 	File | ||||
| 	Users map[string]map[string]string | ||||
| } | ||||
| 
 | ||||
| func reload_htdigest(hf *HtdigestFile) { | ||||
| 	r, err := os.Open(hf.Path) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	csv_reader := csv.NewReader(r) | ||||
| 	csv_reader.Comma = ':' | ||||
| 	csv_reader.Comment = '#' | ||||
| 	csv_reader.TrimLeadingSpace = true | ||||
| 
 | ||||
| 	records, err := csv_reader.ReadAll() | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 
 | ||||
| 	hf.Users = make(map[string]map[string]string) | ||||
| 	for _, record := range records { | ||||
| 		_, exists := hf.Users[record[1]] | ||||
| 		if !exists { | ||||
| 			hf.Users[record[1]] = make(map[string]string) | ||||
| 		} | ||||
| 		hf.Users[record[1]][record[0]] = record[2] | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  SecretProvider implementation based on htdigest-formated files. Will | ||||
|  reload htdigest file on changes. Will panic on syntax errors in | ||||
|  htdigest files. | ||||
| */ | ||||
| func HtdigestFileProvider(filename string) SecretProvider { | ||||
| 	hf := &HtdigestFile{File: File{Path: filename}} | ||||
| 	hf.Reload = func() { reload_htdigest(hf) } | ||||
| 	return func(user, realm string) string { | ||||
| 		hf.ReloadIfNeeded() | ||||
| 		_, exists := hf.Users[realm] | ||||
| 		if !exists { | ||||
| 			return "" | ||||
| 		} | ||||
| 		digest, exists := hf.Users[realm][user] | ||||
| 		if !exists { | ||||
| 			return "" | ||||
| 		} | ||||
| 		return digest | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  Structure used for htdigest file authentication. Users map users to | ||||
|  their salted encrypted password | ||||
| */ | ||||
| type HtpasswdFile struct { | ||||
| 	File | ||||
| 	Users map[string]string | ||||
| } | ||||
| 
 | ||||
| func reload_htpasswd(h *HtpasswdFile) { | ||||
| 	r, err := os.Open(h.Path) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	csv_reader := csv.NewReader(r) | ||||
| 	csv_reader.Comma = ':' | ||||
| 	csv_reader.Comment = '#' | ||||
| 	csv_reader.TrimLeadingSpace = true | ||||
| 
 | ||||
| 	records, err := csv_reader.ReadAll() | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 
 | ||||
| 	h.Users = make(map[string]string) | ||||
| 	for _, record := range records { | ||||
| 		h.Users[record[0]] = record[1] | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* | ||||
|  SecretProvider implementation based on htpasswd-formated files. Will | ||||
|  reload htpasswd file on changes. Will panic on syntax errors in | ||||
|  htpasswd files. Realm argument of the SecretProvider is ignored. | ||||
| */ | ||||
| func HtpasswdFileProvider(filename string) SecretProvider { | ||||
| 	h := &HtpasswdFile{File: File{Path: filename}} | ||||
| 	h.Reload = func() { reload_htpasswd(h) } | ||||
| 	return func(user, realm string) string { | ||||
| 		h.ReloadIfNeeded() | ||||
| 		password, exists := h.Users[user] | ||||
| 		if !exists { | ||||
| 			return "" | ||||
| 		} | ||||
| 		return password | ||||
| 	} | ||||
| } | ||||
|  | @ -15,6 +15,12 @@ type Config struct { | |||
| 	Endpoint      string | ||||
| 	SigningRegion string | ||||
| 	SigningName   string | ||||
| 
 | ||||
| 	// States that the signing name did not come from a modeled source but
 | ||||
| 	// was derived based on other data. Used by service client constructors
 | ||||
| 	// to determine if the signin name can be overriden based on metadata the
 | ||||
| 	// service has.
 | ||||
| 	SigningNameDerived bool | ||||
| } | ||||
| 
 | ||||
| // ConfigProvider provides a generic way for a service client to receive
 | ||||
|  | @ -85,6 +91,6 @@ func (c *Client) AddDebugHandlers() { | |||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) | ||||
| 	c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) | ||||
| 	c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) | ||||
| 	c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) | ||||
| } | ||||
|  |  | |||
							
								
								
									
										74
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										74
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -1,11 +1,11 @@ | |||
| package client | ||||
| 
 | ||||
| import ( | ||||
| 	"math/rand" | ||||
| 	"sync" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
| 	"github.com/aws/aws-sdk-go/internal/sdkrand" | ||||
| ) | ||||
| 
 | ||||
| // DefaultRetryer implements basic retry logic using exponential backoff for
 | ||||
|  | @ -30,25 +30,27 @@ func (d DefaultRetryer) MaxRetries() int { | |||
| 	return d.NumMaxRetries | ||||
| } | ||||
| 
 | ||||
| var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) | ||||
| 
 | ||||
| // RetryRules returns the delay duration before retrying this request again
 | ||||
| func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { | ||||
| 	// Set the upper limit of delay in retrying at ~five minutes
 | ||||
| 	minTime := 30 | ||||
| 	throttle := d.shouldThrottle(r) | ||||
| 	if throttle { | ||||
| 		if delay, ok := getRetryDelay(r); ok { | ||||
| 			return delay | ||||
| 		} | ||||
| 
 | ||||
| 		minTime = 500 | ||||
| 	} | ||||
| 
 | ||||
| 	retryCount := r.RetryCount | ||||
| 	if retryCount > 13 { | ||||
| 		retryCount = 13 | ||||
| 	} else if throttle && retryCount > 8 { | ||||
| 	if throttle && retryCount > 8 { | ||||
| 		retryCount = 8 | ||||
| 	} else if retryCount > 13 { | ||||
| 		retryCount = 13 | ||||
| 	} | ||||
| 
 | ||||
| 	delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) | ||||
| 	delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) | ||||
| 	return time.Duration(delay) * time.Millisecond | ||||
| } | ||||
| 
 | ||||
|  | @ -60,7 +62,7 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { | |||
| 		return *r.Retryable | ||||
| 	} | ||||
| 
 | ||||
| 	if r.HTTPResponse.StatusCode >= 500 { | ||||
| 	if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { | ||||
| 		return true | ||||
| 	} | ||||
| 	return r.IsErrorRetryable() || d.shouldThrottle(r) | ||||
|  | @ -68,29 +70,47 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { | |||
| 
 | ||||
| // ShouldThrottle returns true if the request should be throttled.
 | ||||
| func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { | ||||
| 	if r.HTTPResponse.StatusCode == 502 || | ||||
| 		r.HTTPResponse.StatusCode == 503 || | ||||
| 		r.HTTPResponse.StatusCode == 504 { | ||||
| 		return true | ||||
| 	} | ||||
| 	switch r.HTTPResponse.StatusCode { | ||||
| 	case 429: | ||||
| 	case 502: | ||||
| 	case 503: | ||||
| 	case 504: | ||||
| 	default: | ||||
| 		return r.IsErrorThrottle() | ||||
| 	} | ||||
| 
 | ||||
| // lockedSource is a thread-safe implementation of rand.Source
 | ||||
| type lockedSource struct { | ||||
| 	lk  sync.Mutex | ||||
| 	src rand.Source | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| func (r *lockedSource) Int63() (n int64) { | ||||
| 	r.lk.Lock() | ||||
| 	n = r.src.Int63() | ||||
| 	r.lk.Unlock() | ||||
| 	return | ||||
| // This will look in the Retry-After header, RFC 7231, for how long
 | ||||
| // it will wait before attempting another request
 | ||||
| func getRetryDelay(r *request.Request) (time.Duration, bool) { | ||||
| 	if !canUseRetryAfterHeader(r) { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 
 | ||||
| func (r *lockedSource) Seed(seed int64) { | ||||
| 	r.lk.Lock() | ||||
| 	r.src.Seed(seed) | ||||
| 	r.lk.Unlock() | ||||
| 	delayStr := r.HTTPResponse.Header.Get("Retry-After") | ||||
| 	if len(delayStr) == 0 { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 
 | ||||
| 	delay, err := strconv.Atoi(delayStr) | ||||
| 	if err != nil { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 
 | ||||
| 	return time.Duration(delay) * time.Second, true | ||||
| } | ||||
| 
 | ||||
| // Will look at the status code to see if the retry header pertains to
 | ||||
| // the status code.
 | ||||
| func canUseRetryAfterHeader(r *request.Request) bool { | ||||
| 	switch r.HTTPResponse.StatusCode { | ||||
| 	case 429: | ||||
| 	case 503: | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	return true | ||||
| } | ||||
|  |  | |||
|  | @ -44,22 +44,57 @@ func (reader *teeReaderCloser) Close() error { | |||
| 	return reader.Source.Close() | ||||
| } | ||||
| 
 | ||||
| // LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
 | ||||
| // to a service. Will include the HTTP request body if the LogLevel of the
 | ||||
| // request matches LogDebugWithHTTPBody.
 | ||||
| var LogHTTPRequestHandler = request.NamedHandler{ | ||||
| 	Name: "awssdk.client.LogRequest", | ||||
| 	Fn:   logRequest, | ||||
| } | ||||
| 
 | ||||
| func logRequest(r *request.Request) { | ||||
| 	logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) | ||||
| 	dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) | ||||
| 	bodySeekable := aws.IsReaderSeekable(r.Body) | ||||
| 
 | ||||
| 	b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) | ||||
| 	if err != nil { | ||||
| 		r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) | ||||
| 		r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, | ||||
| 			r.ClientInfo.ServiceName, r.Operation.Name, err)) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	if logBody { | ||||
| 		if !bodySeekable { | ||||
| 			r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) | ||||
| 		} | ||||
| 		// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
 | ||||
| 		// Body as a NoOpCloser and will not be reset after read by the HTTP
 | ||||
| 		// client reader.
 | ||||
| 		r.ResetBody() | ||||
| 	} | ||||
| 
 | ||||
| 	r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) | ||||
| 	r.Config.Logger.Log(fmt.Sprintf(logReqMsg, | ||||
| 		r.ClientInfo.ServiceName, r.Operation.Name, string(b))) | ||||
| } | ||||
| 
 | ||||
| // LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
 | ||||
| // to a service. Will only log the HTTP request's headers. The request payload
 | ||||
| // will not be read.
 | ||||
| var LogHTTPRequestHeaderHandler = request.NamedHandler{ | ||||
| 	Name: "awssdk.client.LogRequestHeader", | ||||
| 	Fn:   logRequestHeader, | ||||
| } | ||||
| 
 | ||||
| func logRequestHeader(r *request.Request) { | ||||
| 	b, err := httputil.DumpRequestOut(r.HTTPRequest, false) | ||||
| 	if err != nil { | ||||
| 		r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, | ||||
| 			r.ClientInfo.ServiceName, r.Operation.Name, err)) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	r.Config.Logger.Log(fmt.Sprintf(logReqMsg, | ||||
| 		r.ClientInfo.ServiceName, r.Operation.Name, string(b))) | ||||
| } | ||||
| 
 | ||||
| const logRespMsg = `DEBUG: Response %s/%s Details: | ||||
|  | @ -72,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s: | |||
| %s | ||||
| -----------------------------------------------------` | ||||
| 
 | ||||
| // LogHTTPResponseHandler is a SDK request handler to log the HTTP response
 | ||||
| // received from a service. Will include the HTTP response body if the LogLevel
 | ||||
| // of the request matches LogDebugWithHTTPBody.
 | ||||
| var LogHTTPResponseHandler = request.NamedHandler{ | ||||
| 	Name: "awssdk.client.LogResponse", | ||||
| 	Fn:   logResponse, | ||||
| } | ||||
| 
 | ||||
| func logResponse(r *request.Request) { | ||||
| 	lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} | ||||
| 
 | ||||
| 	logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) | ||||
| 	if logBody { | ||||
| 		r.HTTPResponse.Body = &teeReaderCloser{ | ||||
| 			Reader: io.TeeReader(r.HTTPResponse.Body, lw), | ||||
| 			Source: r.HTTPResponse.Body, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	handlerFn := func(req *request.Request) { | ||||
| 		body, err := httputil.DumpResponse(req.HTTPResponse, false) | ||||
| 		b, err := httputil.DumpResponse(req.HTTPResponse, false) | ||||
| 		if err != nil { | ||||
| 			lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) | ||||
| 			lw.Logger.Log(fmt.Sprintf(logRespErrMsg, | ||||
| 				req.ClientInfo.ServiceName, req.Operation.Name, err)) | ||||
| 			return | ||||
| 		} | ||||
| 
 | ||||
| 		lw.Logger.Log(fmt.Sprintf(logRespMsg, | ||||
| 			req.ClientInfo.ServiceName, req.Operation.Name, string(b))) | ||||
| 
 | ||||
| 		if logBody { | ||||
| 			b, err := ioutil.ReadAll(lw.buf) | ||||
| 			if err != nil { | ||||
| 			lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) | ||||
| 				lw.Logger.Log(fmt.Sprintf(logRespErrMsg, | ||||
| 					req.ClientInfo.ServiceName, req.Operation.Name, err)) | ||||
| 				return | ||||
| 			} | ||||
| 		lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) | ||||
| 		if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { | ||||
| 
 | ||||
| 			lw.Logger.Log(string(b)) | ||||
| 		} | ||||
| 	} | ||||
|  | @ -106,3 +158,27 @@ func logResponse(r *request.Request) { | |||
| 		Name: handlerName, Fn: handlerFn, | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| // LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
 | ||||
| // response received from a service. Will only log the HTTP response's headers.
 | ||||
| // The response payload will not be read.
 | ||||
| var LogHTTPResponseHeaderHandler = request.NamedHandler{ | ||||
| 	Name: "awssdk.client.LogResponseHeader", | ||||
| 	Fn:   logResponseHeader, | ||||
| } | ||||
| 
 | ||||
| func logResponseHeader(r *request.Request) { | ||||
| 	if r.Config.Logger == nil { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	b, err := httputil.DumpResponse(r.HTTPResponse, false) | ||||
| 	if err != nil { | ||||
| 		r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, | ||||
| 			r.ClientInfo.ServiceName, r.Operation.Name, err)) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	r.Config.Logger.Log(fmt.Sprintf(logRespMsg, | ||||
| 		r.ClientInfo.ServiceName, r.Operation.Name, string(b))) | ||||
| } | ||||
|  |  | |||
|  | @ -3,6 +3,7 @@ package metadata | |||
| // ClientInfo wraps immutable data from the client.Client structure.
 | ||||
| type ClientInfo struct { | ||||
| 	ServiceName   string | ||||
| 	ServiceID     string | ||||
| 	APIVersion    string | ||||
| 	Endpoint      string | ||||
| 	SigningName   string | ||||
|  |  | |||
|  | @ -151,6 +151,15 @@ type Config struct { | |||
| 	// with accelerate.
 | ||||
| 	S3UseAccelerate *bool | ||||
| 
 | ||||
| 	// S3DisableContentMD5Validation config option is temporarily disabled,
 | ||||
| 	// For S3 GetObject API calls, #1837.
 | ||||
| 	//
 | ||||
| 	// Set this to `true` to disable the S3 service client from automatically
 | ||||
| 	// adding the ContentMD5 to S3 Object Put and Upload API calls. This option
 | ||||
| 	// will also disable the SDK from performing object ContentMD5 validation
 | ||||
| 	// on GetObject API calls.
 | ||||
| 	S3DisableContentMD5Validation *bool | ||||
| 
 | ||||
| 	// Set this to `true` to disable the EC2Metadata client from overriding the
 | ||||
| 	// default http.Client's Timeout. This is helpful if you do not want the
 | ||||
| 	// EC2Metadata client to create a new http.Client. This options is only
 | ||||
|  | @ -168,7 +177,7 @@ type Config struct { | |||
| 	//
 | ||||
| 	EC2MetadataDisableTimeoutOverride *bool | ||||
| 
 | ||||
| 	// Instructs the endpiont to be generated for a service client to
 | ||||
| 	// Instructs the endpoint to be generated for a service client to
 | ||||
| 	// be the dual stack endpoint. The dual stack endpoint will support
 | ||||
| 	// both IPv4 and IPv6 addressing.
 | ||||
| 	//
 | ||||
|  | @ -336,6 +345,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config { | |||
| func (c *Config) WithS3UseAccelerate(enable bool) *Config { | ||||
| 	c.S3UseAccelerate = &enable | ||||
| 	return c | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // WithS3DisableContentMD5Validation sets a config
 | ||||
| // S3DisableContentMD5Validation value returning a Config pointer for chaining.
 | ||||
| func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { | ||||
| 	c.S3DisableContentMD5Validation = &enable | ||||
| 	return c | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // WithUseDualStack sets a config UseDualStack value returning a Config
 | ||||
|  | @ -435,6 +453,10 @@ func mergeInConfig(dst *Config, other *Config) { | |||
| 		dst.S3UseAccelerate = other.S3UseAccelerate | ||||
| 	} | ||||
| 
 | ||||
| 	if other.S3DisableContentMD5Validation != nil { | ||||
| 		dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation | ||||
| 	} | ||||
| 
 | ||||
| 	if other.UseDualStack != nil { | ||||
| 		dst.UseDualStack = other.UseDualStack | ||||
| 	} | ||||
|  |  | |||
							
								
								
									
										28
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										28
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -3,12 +3,10 @@ package corehandlers | |||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"net/http" | ||||
| 	"net/url" | ||||
| 	"regexp" | ||||
| 	"runtime" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
| 
 | ||||
|  | @ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen | |||
| 	if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { | ||||
| 		length, _ = strconv.ParseInt(slength, 10, 64) | ||||
| 	} else { | ||||
| 		switch body := r.Body.(type) { | ||||
| 		case nil: | ||||
| 			length = 0 | ||||
| 		case lener: | ||||
| 			length = int64(body.Len()) | ||||
| 		case io.Seeker: | ||||
| 			r.BodyStart, _ = body.Seek(0, 1) | ||||
| 			end, _ := body.Seek(0, 2) | ||||
| 			body.Seek(r.BodyStart, 0) // make sure to seek back to original location
 | ||||
| 			length = end - r.BodyStart | ||||
| 		default: | ||||
| 			panic("Cannot get length of body, must provide `ContentLength`") | ||||
| 		if r.Body != nil { | ||||
| 			var err error | ||||
| 			length, err = aws.SeekerLen(r.Body) | ||||
| 			if err != nil { | ||||
| 				r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  | @ -60,13 +53,6 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen | |||
| 	} | ||||
| }} | ||||
| 
 | ||||
| // SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
 | ||||
| var SDKVersionUserAgentHandler = request.NamedHandler{ | ||||
| 	Name: "core.SDKVersionUserAgentHandler", | ||||
| 	Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, | ||||
| 		runtime.Version(), runtime.GOOS, runtime.GOARCH), | ||||
| } | ||||
| 
 | ||||
| var reStatusCode = regexp.MustCompile(`^(\d{3})`) | ||||
| 
 | ||||
| // ValidateReqSigHandler is a request handler to ensure that the request's
 | ||||
|  |  | |||
							
								
								
									
										37
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										37
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,37 @@ | |||
| package corehandlers | ||||
| 
 | ||||
| import ( | ||||
| 	"os" | ||||
| 	"runtime" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
| ) | ||||
| 
 | ||||
| // SDKVersionUserAgentHandler is a request handler for adding the SDK Version
 | ||||
| // to the user agent.
 | ||||
| var SDKVersionUserAgentHandler = request.NamedHandler{ | ||||
| 	Name: "core.SDKVersionUserAgentHandler", | ||||
| 	Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, | ||||
| 		runtime.Version(), runtime.GOOS, runtime.GOARCH), | ||||
| } | ||||
| 
 | ||||
| const execEnvVar = `AWS_EXECUTION_ENV` | ||||
| const execEnvUAKey = `exec_env` | ||||
| 
 | ||||
| // AddHostExecEnvUserAgentHander is a request handler appending the SDK's
 | ||||
| // execution environment to the user agent.
 | ||||
| //
 | ||||
| // If the environment variable AWS_EXECUTION_ENV is set, its value will be
 | ||||
| // appended to the user agent string.
 | ||||
| var AddHostExecEnvUserAgentHander = request.NamedHandler{ | ||||
| 	Name: "core.AddHostExecEnvUserAgentHander", | ||||
| 	Fn: func(r *request.Request) { | ||||
| 		v := os.Getenv(execEnvVar) | ||||
| 		if len(v) == 0 { | ||||
| 			return | ||||
| 		} | ||||
| 
 | ||||
| 		request.AddToUserAgent(r, execEnvUAKey+"/"+v) | ||||
| 	}, | ||||
| } | ||||
							
								
								
									
										18
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										18
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -178,7 +178,8 @@ func (e *Expiry) IsExpired() bool { | |||
| type Credentials struct { | ||||
| 	creds        Value | ||||
| 	forceRefresh bool | ||||
| 	m            sync.Mutex | ||||
| 
 | ||||
| 	m sync.RWMutex | ||||
| 
 | ||||
| 	provider Provider | ||||
| } | ||||
|  | @ -201,6 +202,17 @@ func NewCredentials(provider Provider) *Credentials { | |||
| // If Credentials.Expire() was called the credentials Value will be force
 | ||||
| // expired, and the next call to Get() will cause them to be refreshed.
 | ||||
| func (c *Credentials) Get() (Value, error) { | ||||
| 	// Check the cached credentials first with just the read lock.
 | ||||
| 	c.m.RLock() | ||||
| 	if !c.isExpired() { | ||||
| 		creds := c.creds | ||||
| 		c.m.RUnlock() | ||||
| 		return creds, nil | ||||
| 	} | ||||
| 	c.m.RUnlock() | ||||
| 
 | ||||
| 	// Credentials are expired need to retrieve the credentials taking the full
 | ||||
| 	// lock.
 | ||||
| 	c.m.Lock() | ||||
| 	defer c.m.Unlock() | ||||
| 
 | ||||
|  | @ -234,8 +246,8 @@ func (c *Credentials) Expire() { | |||
| // If the Credentials were forced to be expired with Expire() this will
 | ||||
| // reflect that override.
 | ||||
| func (c *Credentials) IsExpired() bool { | ||||
| 	c.m.Lock() | ||||
| 	defer c.m.Unlock() | ||||
| 	c.m.RLock() | ||||
| 	defer c.m.RUnlock() | ||||
| 
 | ||||
| 	return c.isExpired() | ||||
| } | ||||
|  |  | |||
							
								
								
									
										46
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										46
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,46 @@ | |||
| // Package csm provides Client Side Monitoring (CSM) which enables sending metrics
 | ||||
| // via UDP connection. Using the Start function will enable the reporting of
 | ||||
| // metrics on a given port. If Start is called, with different parameters, again,
 | ||||
| // a panic will occur.
 | ||||
| //
 | ||||
| // Pause can be called to pause any metrics publishing on a given port. Sessions
 | ||||
| // that have had their handlers modified via InjectHandlers may still be used.
 | ||||
| // However, the handlers will act as a no-op meaning no metrics will be published.
 | ||||
| //
 | ||||
| //	Example:
 | ||||
| //		r, err := csm.Start("clientID", ":31000")
 | ||||
| //		if err != nil {
 | ||||
| //			panic(fmt.Errorf("failed starting CSM:  %v", err))
 | ||||
| //		}
 | ||||
| //
 | ||||
| //		sess, err := session.NewSession(&aws.Config{})
 | ||||
| //		if err != nil {
 | ||||
| //			panic(fmt.Errorf("failed loading session: %v", err))
 | ||||
| //		}
 | ||||
| //
 | ||||
| //		r.InjectHandlers(&sess.Handlers)
 | ||||
| //
 | ||||
| //		client := s3.New(sess)
 | ||||
| //		resp, err := client.GetObject(&s3.GetObjectInput{
 | ||||
| //			Bucket: aws.String("bucket"),
 | ||||
| //			Key: aws.String("key"),
 | ||||
| //		})
 | ||||
| //
 | ||||
| //		// Will pause monitoring
 | ||||
| //		r.Pause()
 | ||||
| //		resp, err = client.GetObject(&s3.GetObjectInput{
 | ||||
| //			Bucket: aws.String("bucket"),
 | ||||
| //			Key: aws.String("key"),
 | ||||
| //		})
 | ||||
| //
 | ||||
| //		// Resume monitoring
 | ||||
| //		r.Continue()
 | ||||
| //
 | ||||
| // Start returns a Reporter that is used to enable or disable monitoring. If
 | ||||
| // access to the Reporter is required later, calling Get will return the Reporter
 | ||||
| // singleton.
 | ||||
| //
 | ||||
| //	Example:
 | ||||
| //		r := csm.Get()
 | ||||
| //		r.Continue()
 | ||||
| package csm | ||||
							
								
								
									
										67
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										67
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,67 @@ | |||
| package csm | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	lock sync.Mutex | ||||
| ) | ||||
| 
 | ||||
| // Client side metric handler names
 | ||||
| const ( | ||||
| 	APICallMetricHandlerName        = "awscsm.SendAPICallMetric" | ||||
| 	APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" | ||||
| ) | ||||
| 
 | ||||
| // Start will start the a long running go routine to capture
 | ||||
| // client side metrics. Calling start multiple time will only
 | ||||
| // start the metric listener once and will panic if a different
 | ||||
| // client ID or port is passed in.
 | ||||
| //
 | ||||
| //	Example:
 | ||||
| //		r, err := csm.Start("clientID", "127.0.0.1:8094")
 | ||||
| //		if err != nil {
 | ||||
| //			panic(fmt.Errorf("expected no error, but received %v", err))
 | ||||
| //		}
 | ||||
| //		sess := session.NewSession()
 | ||||
| //		r.InjectHandlers(sess.Handlers)
 | ||||
| //
 | ||||
| //		svc := s3.New(sess)
 | ||||
| //		out, err := svc.GetObject(&s3.GetObjectInput{
 | ||||
| //			Bucket: aws.String("bucket"),
 | ||||
| //			Key: aws.String("key"),
 | ||||
| //		})
 | ||||
| func Start(clientID string, url string) (*Reporter, error) { | ||||
| 	lock.Lock() | ||||
| 	defer lock.Unlock() | ||||
| 
 | ||||
| 	if sender == nil { | ||||
| 		sender = newReporter(clientID, url) | ||||
| 	} else { | ||||
| 		if sender.clientID != clientID { | ||||
| 			panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) | ||||
| 		} | ||||
| 
 | ||||
| 		if sender.url != url { | ||||
| 			panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if err := connect(url); err != nil { | ||||
| 		sender = nil | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return sender, nil | ||||
| } | ||||
| 
 | ||||
| // Get will return a reporter if one exists, if one does not exist, nil will
 | ||||
| // be returned.
 | ||||
| func Get() *Reporter { | ||||
| 	lock.Lock() | ||||
| 	defer lock.Unlock() | ||||
| 
 | ||||
| 	return sender | ||||
| } | ||||
							
								
								
									
										51
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										51
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,51 @@ | |||
| package csm | ||||
| 
 | ||||
| import ( | ||||
| 	"strconv" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| type metricTime time.Time | ||||
| 
 | ||||
| func (t metricTime) MarshalJSON() ([]byte, error) { | ||||
| 	ns := time.Duration(time.Time(t).UnixNano()) | ||||
| 	return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil | ||||
| } | ||||
| 
 | ||||
| type metric struct { | ||||
| 	ClientID  *string     `json:"ClientId,omitempty"` | ||||
| 	API       *string     `json:"Api,omitempty"` | ||||
| 	Service   *string     `json:"Service,omitempty"` | ||||
| 	Timestamp *metricTime `json:"Timestamp,omitempty"` | ||||
| 	Type      *string     `json:"Type,omitempty"` | ||||
| 	Version   *int        `json:"Version,omitempty"` | ||||
| 
 | ||||
| 	AttemptCount *int `json:"AttemptCount,omitempty"` | ||||
| 	Latency      *int `json:"Latency,omitempty"` | ||||
| 
 | ||||
| 	Fqdn           *string `json:"Fqdn,omitempty"` | ||||
| 	UserAgent      *string `json:"UserAgent,omitempty"` | ||||
| 	AttemptLatency *int    `json:"AttemptLatency,omitempty"` | ||||
| 
 | ||||
| 	SessionToken   *string `json:"SessionToken,omitempty"` | ||||
| 	Region         *string `json:"Region,omitempty"` | ||||
| 	AccessKey      *string `json:"AccessKey,omitempty"` | ||||
| 	HTTPStatusCode *int    `json:"HttpStatusCode,omitempty"` | ||||
| 	XAmzID2        *string `json:"XAmzId2,omitempty"` | ||||
| 	XAmzRequestID  *string `json:"XAmznRequestId,omitempty"` | ||||
| 
 | ||||
| 	AWSException        *string `json:"AwsException,omitempty"` | ||||
| 	AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` | ||||
| 	SDKException        *string `json:"SdkException,omitempty"` | ||||
| 	SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` | ||||
| 
 | ||||
| 	DestinationIP    *string `json:"DestinationIp,omitempty"` | ||||
| 	ConnectionReused *int    `json:"ConnectionReused,omitempty"` | ||||
| 
 | ||||
| 	AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` | ||||
| 	ConnectLatency           *int `json:"ConnectLatency,omitempty"` | ||||
| 	RequestLatency           *int `json:"RequestLatency,omitempty"` | ||||
| 	DNSLatency               *int `json:"DnsLatency,omitempty"` | ||||
| 	TCPLatency               *int `json:"TcpLatency,omitempty"` | ||||
| 	SSLLatency               *int `json:"SslLatency,omitempty"` | ||||
| } | ||||
							
								
								
									
										54
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/metricChan.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										54
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/metricChan.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,54 @@ | |||
| package csm | ||||
| 
 | ||||
| import ( | ||||
| 	"sync/atomic" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	runningEnum = iota | ||||
| 	pausedEnum | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// MetricsChannelSize of metrics to hold in the channel
 | ||||
| 	MetricsChannelSize = 100 | ||||
| ) | ||||
| 
 | ||||
| type metricChan struct { | ||||
| 	ch     chan metric | ||||
| 	paused int64 | ||||
| } | ||||
| 
 | ||||
| func newMetricChan(size int) metricChan { | ||||
| 	return metricChan{ | ||||
| 		ch: make(chan metric, size), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (ch *metricChan) Pause() { | ||||
| 	atomic.StoreInt64(&ch.paused, pausedEnum) | ||||
| } | ||||
| 
 | ||||
| func (ch *metricChan) Continue() { | ||||
| 	atomic.StoreInt64(&ch.paused, runningEnum) | ||||
| } | ||||
| 
 | ||||
| func (ch *metricChan) IsPaused() bool { | ||||
| 	v := atomic.LoadInt64(&ch.paused) | ||||
| 	return v == pausedEnum | ||||
| } | ||||
| 
 | ||||
| // Push will push metrics to the metric channel if the channel
 | ||||
| // is not paused
 | ||||
| func (ch *metricChan) Push(m metric) bool { | ||||
| 	if ch.IsPaused() { | ||||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	select { | ||||
| 	case ch.ch <- m: | ||||
| 		return true | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										230
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										230
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,230 @@ | |||
| package csm | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"net" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/aws/awserr" | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	// DefaultPort is used when no port is specified
 | ||||
| 	DefaultPort = "31000" | ||||
| ) | ||||
| 
 | ||||
| // Reporter will gather metrics of API requests made and
 | ||||
| // send those metrics to the CSM endpoint.
 | ||||
| type Reporter struct { | ||||
| 	clientID  string | ||||
| 	url       string | ||||
| 	conn      net.Conn | ||||
| 	metricsCh metricChan | ||||
| 	done      chan struct{} | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	sender *Reporter | ||||
| ) | ||||
| 
 | ||||
| func connect(url string) error { | ||||
| 	const network = "udp" | ||||
| 	if err := sender.connect(network, url); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if sender.done == nil { | ||||
| 		sender.done = make(chan struct{}) | ||||
| 		go sender.start() | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func newReporter(clientID, url string) *Reporter { | ||||
| 	return &Reporter{ | ||||
| 		clientID:  clientID, | ||||
| 		url:       url, | ||||
| 		metricsCh: newMetricChan(MetricsChannelSize), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { | ||||
| 	if rep == nil { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	now := time.Now() | ||||
| 	creds, _ := r.Config.Credentials.Get() | ||||
| 
 | ||||
| 	m := metric{ | ||||
| 		ClientID:  aws.String(rep.clientID), | ||||
| 		API:       aws.String(r.Operation.Name), | ||||
| 		Service:   aws.String(r.ClientInfo.ServiceID), | ||||
| 		Timestamp: (*metricTime)(&now), | ||||
| 		UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), | ||||
| 		Region:    r.Config.Region, | ||||
| 		Type:      aws.String("ApiCallAttempt"), | ||||
| 		Version:   aws.Int(1), | ||||
| 
 | ||||
| 		XAmzRequestID: aws.String(r.RequestID), | ||||
| 
 | ||||
| 		AttemptCount:   aws.Int(r.RetryCount + 1), | ||||
| 		AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), | ||||
| 		AccessKey:      aws.String(creds.AccessKeyID), | ||||
| 	} | ||||
| 
 | ||||
| 	if r.HTTPResponse != nil { | ||||
| 		m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) | ||||
| 	} | ||||
| 
 | ||||
| 	if r.Error != nil { | ||||
| 		if awserr, ok := r.Error.(awserr.Error); ok { | ||||
| 			setError(&m, awserr) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	rep.metricsCh.Push(m) | ||||
| } | ||||
| 
 | ||||
| func setError(m *metric, err awserr.Error) { | ||||
| 	msg := err.Message() | ||||
| 	code := err.Code() | ||||
| 
 | ||||
| 	switch code { | ||||
| 	case "RequestError", | ||||
| 		"SerializationError", | ||||
| 		request.CanceledErrorCode: | ||||
| 
 | ||||
| 		m.SDKException = &code | ||||
| 		m.SDKExceptionMessage = &msg | ||||
| 	default: | ||||
| 		m.AWSException = &code | ||||
| 		m.AWSExceptionMessage = &msg | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (rep *Reporter) sendAPICallMetric(r *request.Request) { | ||||
| 	if rep == nil { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	now := time.Now() | ||||
| 	m := metric{ | ||||
| 		ClientID:      aws.String(rep.clientID), | ||||
| 		API:           aws.String(r.Operation.Name), | ||||
| 		Service:       aws.String(r.ClientInfo.ServiceID), | ||||
| 		Timestamp:     (*metricTime)(&now), | ||||
| 		Type:          aws.String("ApiCall"), | ||||
| 		AttemptCount:  aws.Int(r.RetryCount + 1), | ||||
| 		Latency:       aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), | ||||
| 		XAmzRequestID: aws.String(r.RequestID), | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO: Probably want to figure something out for logging dropped
 | ||||
| 	// metrics
 | ||||
| 	rep.metricsCh.Push(m) | ||||
| } | ||||
| 
 | ||||
| func (rep *Reporter) connect(network, url string) error { | ||||
| 	if rep.conn != nil { | ||||
| 		rep.conn.Close() | ||||
| 	} | ||||
| 
 | ||||
| 	conn, err := net.Dial(network, url) | ||||
| 	if err != nil { | ||||
| 		return awserr.New("UDPError", "Could not connect", err) | ||||
| 	} | ||||
| 
 | ||||
| 	rep.conn = conn | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (rep *Reporter) close() { | ||||
| 	if rep.done != nil { | ||||
| 		close(rep.done) | ||||
| 	} | ||||
| 
 | ||||
| 	rep.metricsCh.Pause() | ||||
| } | ||||
| 
 | ||||
| func (rep *Reporter) start() { | ||||
| 	defer func() { | ||||
| 		rep.metricsCh.Pause() | ||||
| 	}() | ||||
| 
 | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-rep.done: | ||||
| 			rep.done = nil | ||||
| 			return | ||||
| 		case m := <-rep.metricsCh.ch: | ||||
| 			// TODO: What to do with this error? Probably should just log
 | ||||
| 			b, err := json.Marshal(m) | ||||
| 			if err != nil { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			rep.conn.Write(b) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Pause will pause the metric channel preventing any new metrics from
 | ||||
| // being added.
 | ||||
| func (rep *Reporter) Pause() { | ||||
| 	lock.Lock() | ||||
| 	defer lock.Unlock() | ||||
| 
 | ||||
| 	if rep == nil { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	rep.close() | ||||
| } | ||||
| 
 | ||||
| // Continue will reopen the metric channel and allow for monitoring
 | ||||
| // to be resumed.
 | ||||
| func (rep *Reporter) Continue() { | ||||
| 	lock.Lock() | ||||
| 	defer lock.Unlock() | ||||
| 	if rep == nil { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	if !rep.metricsCh.IsPaused() { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	rep.metricsCh.Continue() | ||||
| } | ||||
| 
 | ||||
| // InjectHandlers will will enable client side metrics and inject the proper
 | ||||
| // handlers to handle how metrics are sent.
 | ||||
| //
 | ||||
| //	Example:
 | ||||
| //		// Start must be called in order to inject the correct handlers
 | ||||
| //		r, err := csm.Start("clientID", "127.0.0.1:8094")
 | ||||
| //		if err != nil {
 | ||||
| //			panic(fmt.Errorf("expected no error, but received %v", err))
 | ||||
| //		}
 | ||||
| //
 | ||||
| //		sess := session.NewSession()
 | ||||
| //		r.InjectHandlers(&sess.Handlers)
 | ||||
| //
 | ||||
| //		// create a new service client with our client side metric session
 | ||||
| //		svc := s3.New(sess)
 | ||||
| func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { | ||||
| 	if rep == nil { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric} | ||||
| 	handlers.Complete.PushFrontNamed(apiCallHandler) | ||||
| 
 | ||||
| 	apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric} | ||||
| 	handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) | ||||
| } | ||||
|  | @ -9,6 +9,7 @@ package defaults | |||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| 	"net/http" | ||||
| 	"net/url" | ||||
| 	"os" | ||||
|  | @ -72,6 +73,7 @@ func Handlers() request.Handlers { | |||
| 	handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) | ||||
| 	handlers.Validate.AfterEachFn = request.HandlerListStopOnError | ||||
| 	handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) | ||||
| 	handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) | ||||
| 	handlers.Build.AfterEachFn = request.HandlerListStopOnError | ||||
| 	handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) | ||||
| 	handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) | ||||
|  | @ -118,14 +120,43 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P | |||
| 	return ec2RoleProvider(cfg, handlers) | ||||
| } | ||||
| 
 | ||||
| var lookupHostFn = net.LookupHost | ||||
| 
 | ||||
| func isLoopbackHost(host string) (bool, error) { | ||||
| 	ip := net.ParseIP(host) | ||||
| 	if ip != nil { | ||||
| 		return ip.IsLoopback(), nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Host is not an ip, perform lookup
 | ||||
| 	addrs, err := lookupHostFn(host) | ||||
| 	if err != nil { | ||||
| 		return false, err | ||||
| 	} | ||||
| 	for _, addr := range addrs { | ||||
| 		if !net.ParseIP(addr).IsLoopback() { | ||||
| 			return false, nil | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return true, nil | ||||
| } | ||||
| 
 | ||||
| func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { | ||||
| 	var errMsg string | ||||
| 
 | ||||
| 	parsed, err := url.Parse(u) | ||||
| 	if err != nil { | ||||
| 		errMsg = fmt.Sprintf("invalid URL, %v", err) | ||||
| 	} else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { | ||||
| 		errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) | ||||
| 	} else { | ||||
| 		host := aws.URLHostname(parsed) | ||||
| 		if len(host) == 0 { | ||||
| 			errMsg = "unable to parse host from local HTTP cred provider URL" | ||||
| 		} else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { | ||||
| 			errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) | ||||
| 		} else if !isLoopback { | ||||
| 			errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if len(errMsg) > 0 { | ||||
|  |  | |||
							
								
								
									
										24
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										24
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -1,5 +1,10 @@ | |||
| // Package ec2metadata provides the client for making API calls to the
 | ||||
| // EC2 Metadata service.
 | ||||
| //
 | ||||
| // This package's client can be disabled completely by setting the environment
 | ||||
| // variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
 | ||||
| // true instructs the SDK to disable the EC2 Metadata client. The client cannot
 | ||||
| // be used while the environemnt variable is set to true, (case insensitive).
 | ||||
| package ec2metadata | ||||
| 
 | ||||
| import ( | ||||
|  | @ -7,17 +12,21 @@ import ( | |||
| 	"errors" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/aws/awserr" | ||||
| 	"github.com/aws/aws-sdk-go/aws/client" | ||||
| 	"github.com/aws/aws-sdk-go/aws/client/metadata" | ||||
| 	"github.com/aws/aws-sdk-go/aws/corehandlers" | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
| ) | ||||
| 
 | ||||
| // ServiceName is the name of the service.
 | ||||
| const ServiceName = "ec2metadata" | ||||
| const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" | ||||
| 
 | ||||
| // A EC2Metadata is an EC2 Metadata service Client.
 | ||||
| type EC2Metadata struct { | ||||
|  | @ -75,6 +84,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio | |||
| 	svc.Handlers.Validate.Clear() | ||||
| 	svc.Handlers.Validate.PushBack(validateEndpointHandler) | ||||
| 
 | ||||
| 	// Disable the EC2 Metadata service if the environment variable is set.
 | ||||
| 	// This shortcirctes the service's functionality to always fail to send
 | ||||
| 	// requests.
 | ||||
| 	if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { | ||||
| 		svc.Handlers.Send.SwapNamed(request.NamedHandler{ | ||||
| 			Name: corehandlers.SendHandler.Name, | ||||
| 			Fn: func(r *request.Request) { | ||||
| 				r.Error = awserr.New( | ||||
| 					request.CanceledErrorCode, | ||||
| 					"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", | ||||
| 					nil) | ||||
| 			}, | ||||
| 		}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Add additional options to the service config
 | ||||
| 	for _, option := range opts { | ||||
| 		option(svc.Client) | ||||
|  |  | |||
							
								
								
									
										737
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										737
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
								
								
									generated
								
								
									vendored
								
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										14
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										14
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -206,9 +206,10 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) ( | |||
| // enumerating over the regions in a partition.
 | ||||
| func (p Partition) Regions() map[string]Region { | ||||
| 	rs := map[string]Region{} | ||||
| 	for id := range p.p.Regions { | ||||
| 	for id, r := range p.p.Regions { | ||||
| 		rs[id] = Region{ | ||||
| 			id:   id, | ||||
| 			desc: r.Description, | ||||
| 			p:    p.p, | ||||
| 		} | ||||
| 	} | ||||
|  | @ -240,6 +241,10 @@ type Region struct { | |||
| // ID returns the region's identifier.
 | ||||
| func (r Region) ID() string { return r.id } | ||||
| 
 | ||||
| // Description returns the region's description. The region description
 | ||||
| // is free text, it can be empty, and it may change between SDK releases.
 | ||||
| func (r Region) Description() string { return r.desc } | ||||
| 
 | ||||
| // ResolveEndpoint resolves an endpoint from the context of the region given
 | ||||
| // a service. See Partition.EndpointFor for usage and errors that can be returned.
 | ||||
| func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { | ||||
|  | @ -284,9 +289,10 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve | |||
| func (s Service) Regions() map[string]Region { | ||||
| 	rs := map[string]Region{} | ||||
| 	for id := range s.p.Services[s.id].Endpoints { | ||||
| 		if _, ok := s.p.Regions[id]; ok { | ||||
| 		if r, ok := s.p.Regions[id]; ok { | ||||
| 			rs[id] = Region{ | ||||
| 				id:   id, | ||||
| 				desc: r.Description, | ||||
| 				p:    s.p, | ||||
| 			} | ||||
| 		} | ||||
|  | @ -347,6 +353,10 @@ type ResolvedEndpoint struct { | |||
| 	// The service name that should be used for signing requests.
 | ||||
| 	SigningName string | ||||
| 
 | ||||
| 	// States that the signing name for this endpoint was derived from metadata
 | ||||
| 	// passed in, but was not explicitly modeled.
 | ||||
| 	SigningNameDerived bool | ||||
| 
 | ||||
| 	// The signing method that should be used for signing requests.
 | ||||
| 	SigningMethod string | ||||
| } | ||||
|  |  | |||
|  | @ -226,15 +226,19 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op | |||
| 	if len(signingRegion) == 0 { | ||||
| 		signingRegion = region | ||||
| 	} | ||||
| 
 | ||||
| 	signingName := e.CredentialScope.Service | ||||
| 	var signingNameDerived bool | ||||
| 	if len(signingName) == 0 { | ||||
| 		signingName = service | ||||
| 		signingNameDerived = true | ||||
| 	} | ||||
| 
 | ||||
| 	return ResolvedEndpoint{ | ||||
| 		URL:                u, | ||||
| 		SigningRegion:      signingRegion, | ||||
| 		SigningName:        signingName, | ||||
| 		SigningNameDerived: signingNameDerived, | ||||
| 		SigningMethod:      getByPriority(e.SignatureVersions, signerPriority, defaultSigner), | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -71,6 +71,12 @@ const ( | |||
| 	// LogDebugWithRequestErrors states the SDK should log when service requests fail
 | ||||
| 	// to build, send, validate, or unmarshal.
 | ||||
| 	LogDebugWithRequestErrors | ||||
| 
 | ||||
| 	// LogDebugWithEventStreamBody states the SDK should log EventStream
 | ||||
| 	// request and response bodys. This should be used to log the EventStream
 | ||||
| 	// wire unmarshaled message content of requests and responses made while
 | ||||
| 	// using the SDK Will also enable LogDebug.
 | ||||
| 	LogDebugWithEventStreamBody | ||||
| ) | ||||
| 
 | ||||
| // A Logger is a minimalistic interface for the SDK to log messages to. Should
 | ||||
|  |  | |||
|  | @ -14,6 +14,7 @@ type Handlers struct { | |||
| 	Send             HandlerList | ||||
| 	ValidateResponse HandlerList | ||||
| 	Unmarshal        HandlerList | ||||
| 	UnmarshalStream  HandlerList | ||||
| 	UnmarshalMeta    HandlerList | ||||
| 	UnmarshalError   HandlerList | ||||
| 	Retry            HandlerList | ||||
|  | @ -30,6 +31,7 @@ func (h *Handlers) Copy() Handlers { | |||
| 		Send:             h.Send.copy(), | ||||
| 		ValidateResponse: h.ValidateResponse.copy(), | ||||
| 		Unmarshal:        h.Unmarshal.copy(), | ||||
| 		UnmarshalStream:  h.UnmarshalStream.copy(), | ||||
| 		UnmarshalError:   h.UnmarshalError.copy(), | ||||
| 		UnmarshalMeta:    h.UnmarshalMeta.copy(), | ||||
| 		Retry:            h.Retry.copy(), | ||||
|  | @ -45,6 +47,7 @@ func (h *Handlers) Clear() { | |||
| 	h.Send.Clear() | ||||
| 	h.Sign.Clear() | ||||
| 	h.Unmarshal.Clear() | ||||
| 	h.UnmarshalStream.Clear() | ||||
| 	h.UnmarshalMeta.Clear() | ||||
| 	h.UnmarshalError.Clear() | ||||
| 	h.ValidateResponse.Clear() | ||||
|  | @ -172,6 +175,21 @@ func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { | |||
| 	return swapped | ||||
| } | ||||
| 
 | ||||
| // Swap will swap out all handlers matching the name passed in. The matched
 | ||||
| // handlers will be swapped in. True is returned if the handlers were swapped.
 | ||||
| func (l *HandlerList) Swap(name string, replace NamedHandler) bool { | ||||
| 	var swapped bool | ||||
| 
 | ||||
| 	for i := 0; i < len(l.list); i++ { | ||||
| 		if l.list[i].Name == name { | ||||
| 			l.list[i] = replace | ||||
| 			swapped = true | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return swapped | ||||
| } | ||||
| 
 | ||||
| // SetBackNamed will replace the named handler if it exists in the handler list.
 | ||||
| // If the handler does not exist the handler will be added to the end of the list.
 | ||||
| func (l *HandlerList) SetBackNamed(n NamedHandler) { | ||||
|  |  | |||
							
								
								
									
										4
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										4
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -3,6 +3,8 @@ package request | |||
| import ( | ||||
| 	"io" | ||||
| 	"sync" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/internal/sdkio" | ||||
| ) | ||||
| 
 | ||||
| // offsetReader is a thread-safe io.ReadCloser to prevent racing
 | ||||
|  | @ -15,7 +17,7 @@ type offsetReader struct { | |||
| 
 | ||||
| func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { | ||||
| 	reader := &offsetReader{} | ||||
| 	buf.Seek(offset, 0) | ||||
| 	buf.Seek(offset, sdkio.SeekStart) | ||||
| 
 | ||||
| 	reader.buf = buf | ||||
| 	return reader | ||||
|  |  | |||
|  | @ -14,6 +14,7 @@ import ( | |||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/aws/awserr" | ||||
| 	"github.com/aws/aws-sdk-go/aws/client/metadata" | ||||
| 	"github.com/aws/aws-sdk-go/internal/sdkio" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
|  | @ -28,6 +29,10 @@ const ( | |||
| 	// during body reads.
 | ||||
| 	ErrCodeResponseTimeout = "ResponseTimeout" | ||||
| 
 | ||||
| 	// ErrCodeInvalidPresignExpire is returned when the expire time provided to
 | ||||
| 	// presign is invalid
 | ||||
| 	ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" | ||||
| 
 | ||||
| 	// CanceledErrorCode is the error code that will be returned by an
 | ||||
| 	// API request that was canceled. Requests given a aws.Context may
 | ||||
| 	// return this error when canceled.
 | ||||
|  | @ -41,8 +46,8 @@ type Request struct { | |||
| 	Handlers   Handlers | ||||
| 
 | ||||
| 	Retryer | ||||
| 	AttemptTime            time.Time | ||||
| 	Time                   time.Time | ||||
| 	ExpireTime             time.Duration | ||||
| 	Operation              *Operation | ||||
| 	HTTPRequest            *http.Request | ||||
| 	HTTPResponse           *http.Response | ||||
|  | @ -60,6 +65,11 @@ type Request struct { | |||
| 	LastSignedAt           time.Time | ||||
| 	DisableFollowRedirects bool | ||||
| 
 | ||||
| 	// A value greater than 0 instructs the request to be signed as Presigned URL
 | ||||
| 	// You should not set this field directly. Instead use Request's
 | ||||
| 	// Presign or PresignRequest methods.
 | ||||
| 	ExpireTime time.Duration | ||||
| 
 | ||||
| 	context aws.Context | ||||
| 
 | ||||
| 	built bool | ||||
|  | @ -104,12 +114,15 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, | |||
| 		err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) | ||||
| 	} | ||||
| 
 | ||||
| 	SanitizeHostForHeader(httpReq) | ||||
| 
 | ||||
| 	r := &Request{ | ||||
| 		Config:     cfg, | ||||
| 		ClientInfo: clientInfo, | ||||
| 		Handlers:   handlers.Copy(), | ||||
| 
 | ||||
| 		Retryer:     retryer, | ||||
| 		AttemptTime: time.Now(), | ||||
| 		Time:        time.Now(), | ||||
| 		ExpireTime:  0, | ||||
| 		Operation:   operation, | ||||
|  | @ -214,6 +227,9 @@ func (r *Request) SetContext(ctx aws.Context) { | |||
| 
 | ||||
| // WillRetry returns if the request's can be retried.
 | ||||
| func (r *Request) WillRetry() bool { | ||||
| 	if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { | ||||
| 		return false | ||||
| 	} | ||||
| 	return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() | ||||
| } | ||||
| 
 | ||||
|  | @ -245,45 +261,70 @@ func (r *Request) SetStringBody(s string) { | |||
| // SetReaderBody will set the request's body reader.
 | ||||
| func (r *Request) SetReaderBody(reader io.ReadSeeker) { | ||||
| 	r.Body = reader | ||||
| 	r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset.
 | ||||
| 	r.ResetBody() | ||||
| } | ||||
| 
 | ||||
| // Presign returns the request's signed URL. Error will be returned
 | ||||
| // if the signing fails.
 | ||||
| func (r *Request) Presign(expireTime time.Duration) (string, error) { | ||||
| 	r.ExpireTime = expireTime | ||||
| //
 | ||||
| // It is invalid to create a presigned URL with a expire duration 0 or less. An
 | ||||
| // error is returned if expire duration is 0 or less.
 | ||||
| func (r *Request) Presign(expire time.Duration) (string, error) { | ||||
| 	r = r.copy() | ||||
| 
 | ||||
| 	// Presign requires all headers be hoisted. There is no way to retrieve
 | ||||
| 	// the signed headers not hoisted without this. Making the presigned URL
 | ||||
| 	// useless.
 | ||||
| 	r.NotHoist = false | ||||
| 
 | ||||
| 	if r.Operation.BeforePresignFn != nil { | ||||
| 		r = r.copy() | ||||
| 		err := r.Operation.BeforePresignFn(r) | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	r.Sign() | ||||
| 	if r.Error != nil { | ||||
| 		return "", r.Error | ||||
| 	} | ||||
| 	return r.HTTPRequest.URL.String(), nil | ||||
| 	u, _, err := getPresignedURL(r, expire) | ||||
| 	return u, err | ||||
| } | ||||
| 
 | ||||
| // PresignRequest behaves just like presign, with the addition of returning a
 | ||||
| // set of headers that were signed.
 | ||||
| //
 | ||||
| // It is invalid to create a presigned URL with a expire duration 0 or less. An
 | ||||
| // error is returned if expire duration is 0 or less.
 | ||||
| //
 | ||||
| // Returns the URL string for the API operation with signature in the query string,
 | ||||
| // and the HTTP headers that were included in the signature. These headers must
 | ||||
| // be included in any HTTP request made with the presigned URL.
 | ||||
| //
 | ||||
| // To prevent hoisting any headers to the query string set NotHoist to true on
 | ||||
| // this Request value prior to calling PresignRequest.
 | ||||
| func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { | ||||
| 	r.ExpireTime = expireTime | ||||
| 	r.Sign() | ||||
| 	if r.Error != nil { | ||||
| 		return "", nil, r.Error | ||||
| func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { | ||||
| 	r = r.copy() | ||||
| 	return getPresignedURL(r, expire) | ||||
| } | ||||
| 
 | ||||
| // IsPresigned returns true if the request represents a presigned API url.
 | ||||
| func (r *Request) IsPresigned() bool { | ||||
| 	return r.ExpireTime != 0 | ||||
| } | ||||
| 
 | ||||
| func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { | ||||
| 	if expire <= 0 { | ||||
| 		return "", nil, awserr.New( | ||||
| 			ErrCodeInvalidPresignExpire, | ||||
| 			"presigned URL requires an expire duration greater than 0", | ||||
| 			nil, | ||||
| 		) | ||||
| 	} | ||||
| 
 | ||||
| 	r.ExpireTime = expire | ||||
| 
 | ||||
| 	if r.Operation.BeforePresignFn != nil { | ||||
| 		if err := r.Operation.BeforePresignFn(r); err != nil { | ||||
| 			return "", nil, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if err := r.Sign(); err != nil { | ||||
| 		return "", nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil | ||||
| } | ||||
| 
 | ||||
|  | @ -303,7 +344,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) { | |||
| 
 | ||||
| // Build will build the request's object so it can be signed and sent
 | ||||
| // to the service. Build will also validate all the request's parameters.
 | ||||
| // Anny additional build Handlers set on this request will be run
 | ||||
| // Any additional build Handlers set on this request will be run
 | ||||
| // in the order they were set.
 | ||||
| //
 | ||||
| // The request will only be built once. Multiple calls to build will have
 | ||||
|  | @ -329,9 +370,9 @@ func (r *Request) Build() error { | |||
| 	return r.Error | ||||
| } | ||||
| 
 | ||||
| // Sign will sign the request returning error if errors are encountered.
 | ||||
| // Sign will sign the request, returning error if errors are encountered.
 | ||||
| //
 | ||||
| // Send will build the request prior to signing. All Sign Handlers will
 | ||||
| // Sign will build the request prior to signing. All Sign Handlers will
 | ||||
| // be executed in the order they were set.
 | ||||
| func (r *Request) Sign() error { | ||||
| 	r.Build() | ||||
|  | @ -364,7 +405,7 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { | |||
| 	// of the SDK if they used that field.
 | ||||
| 	//
 | ||||
| 	// Related golang/go#18257
 | ||||
| 	l, err := computeBodyLength(r.Body) | ||||
| 	l, err := aws.SeekerLen(r.Body) | ||||
| 	if err != nil { | ||||
| 		return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) | ||||
| 	} | ||||
|  | @ -382,7 +423,8 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { | |||
| 		// Transfer-Encoding: chunked bodies for these methods.
 | ||||
| 		//
 | ||||
| 		// This would only happen if a aws.ReaderSeekerCloser was used with
 | ||||
| 		// a io.Reader that was not also an io.Seeker.
 | ||||
| 		// a io.Reader that was not also an io.Seeker, or did not implement
 | ||||
| 		// Len() method.
 | ||||
| 		switch r.Operation.HTTPMethod { | ||||
| 		case "GET", "HEAD", "DELETE": | ||||
| 			body = NoBody | ||||
|  | @ -394,49 +436,13 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { | |||
| 	return body, nil | ||||
| } | ||||
| 
 | ||||
| // Attempts to compute the length of the body of the reader using the
 | ||||
| // io.Seeker interface. If the value is not seekable because of being
 | ||||
| // a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
 | ||||
| // If no error occurs the length of the body will be returned.
 | ||||
| func computeBodyLength(r io.ReadSeeker) (int64, error) { | ||||
| 	seekable := true | ||||
| 	// Determine if the seeker is actually seekable. ReaderSeekerCloser
 | ||||
| 	// hides the fact that a io.Readers might not actually be seekable.
 | ||||
| 	switch v := r.(type) { | ||||
| 	case aws.ReaderSeekerCloser: | ||||
| 		seekable = v.IsSeeker() | ||||
| 	case *aws.ReaderSeekerCloser: | ||||
| 		seekable = v.IsSeeker() | ||||
| 	} | ||||
| 	if !seekable { | ||||
| 		return -1, nil | ||||
| 	} | ||||
| 
 | ||||
| 	curOffset, err := r.Seek(0, 1) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	endOffset, err := r.Seek(0, 2) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	_, err = r.Seek(curOffset, 0) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	return endOffset - curOffset, nil | ||||
| } | ||||
| 
 | ||||
| // GetBody will return an io.ReadSeeker of the Request's underlying
 | ||||
| // input body with a concurrency safe wrapper.
 | ||||
| func (r *Request) GetBody() io.ReadSeeker { | ||||
| 	return r.safeBody | ||||
| } | ||||
| 
 | ||||
| // Send will send the request returning error if errors are encountered.
 | ||||
| // Send will send the request, returning error if errors are encountered.
 | ||||
| //
 | ||||
| // Send will sign the request prior to sending. All Send Handlers will
 | ||||
| // be executed in the order they were set.
 | ||||
|  | @ -457,6 +463,7 @@ func (r *Request) Send() error { | |||
| 	}() | ||||
| 
 | ||||
| 	for { | ||||
| 		r.AttemptTime = time.Now() | ||||
| 		if aws.BoolValue(r.Retryable) { | ||||
| 			if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { | ||||
| 				r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", | ||||
|  | @ -579,3 +586,72 @@ func shouldRetryCancel(r *Request) bool { | |||
| 			errStr != "net/http: request canceled while waiting for connection") | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // SanitizeHostForHeader removes default port from host and updates request.Host
 | ||||
| func SanitizeHostForHeader(r *http.Request) { | ||||
| 	host := getHost(r) | ||||
| 	port := portOnly(host) | ||||
| 	if port != "" && isDefaultPort(r.URL.Scheme, port) { | ||||
| 		r.Host = stripPort(host) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Returns host from request
 | ||||
| func getHost(r *http.Request) string { | ||||
| 	if r.Host != "" { | ||||
| 		return r.Host | ||||
| 	} | ||||
| 
 | ||||
| 	return r.URL.Host | ||||
| } | ||||
| 
 | ||||
| // Hostname returns u.Host, without any port number.
 | ||||
| //
 | ||||
| // If Host is an IPv6 literal with a port number, Hostname returns the
 | ||||
| // IPv6 literal without the square brackets. IPv6 literals may include
 | ||||
| // a zone identifier.
 | ||||
| //
 | ||||
| // Copied from the Go 1.8 standard library (net/url)
 | ||||
| func stripPort(hostport string) string { | ||||
| 	colon := strings.IndexByte(hostport, ':') | ||||
| 	if colon == -1 { | ||||
| 		return hostport | ||||
| 	} | ||||
| 	if i := strings.IndexByte(hostport, ']'); i != -1 { | ||||
| 		return strings.TrimPrefix(hostport[:i], "[") | ||||
| 	} | ||||
| 	return hostport[:colon] | ||||
| } | ||||
| 
 | ||||
| // Port returns the port part of u.Host, without the leading colon.
 | ||||
| // If u.Host doesn't contain a port, Port returns an empty string.
 | ||||
| //
 | ||||
| // Copied from the Go 1.8 standard library (net/url)
 | ||||
| func portOnly(hostport string) string { | ||||
| 	colon := strings.IndexByte(hostport, ':') | ||||
| 	if colon == -1 { | ||||
| 		return "" | ||||
| 	} | ||||
| 	if i := strings.Index(hostport, "]:"); i != -1 { | ||||
| 		return hostport[i+len("]:"):] | ||||
| 	} | ||||
| 	if strings.Contains(hostport, "]") { | ||||
| 		return "" | ||||
| 	} | ||||
| 	return hostport[colon+len(":"):] | ||||
| } | ||||
| 
 | ||||
| // Returns true if the specified URI is using the standard port
 | ||||
| // (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
 | ||||
| func isDefaultPort(scheme, port string) bool { | ||||
| 	if port == "" { | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	lowerCaseScheme := strings.ToLower(scheme) | ||||
| 	if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	return false | ||||
| } | ||||
|  |  | |||
|  | @ -21,7 +21,7 @@ func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } | |||
| var NoBody = noBody{} | ||||
| 
 | ||||
| // ResetBody rewinds the request body back to its starting position, and
 | ||||
| // set's the HTTP Request body reference. When the body is read prior
 | ||||
| // sets the HTTP Request body reference. When the body is read prior
 | ||||
| // to being sent in the HTTP request it will need to be rewound.
 | ||||
| //
 | ||||
| // ResetBody will automatically be called by the SDK's build handler, but if
 | ||||
|  |  | |||
|  | @ -11,7 +11,7 @@ import ( | |||
| var NoBody = http.NoBody | ||||
| 
 | ||||
| // ResetBody rewinds the request body back to its starting position, and
 | ||||
| // set's the HTTP Request body reference. When the body is read prior
 | ||||
| // sets the HTTP Request body reference. When the body is read prior
 | ||||
| // to being sent in the HTTP request it will need to be rewound.
 | ||||
| //
 | ||||
| // ResetBody will automatically be called by the SDK's build handler, but if
 | ||||
|  |  | |||
							
								
								
									
										40
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										40
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -35,8 +35,12 @@ type Pagination struct { | |||
| 	// NewRequest should always be built from the same API operations. It is
 | ||||
| 	// undefined if different API operations are returned on subsequent calls.
 | ||||
| 	NewRequest func() (*Request, error) | ||||
| 	// EndPageOnSameToken, when enabled, will allow the paginator to stop on
 | ||||
| 	// token that are the same as its previous tokens.
 | ||||
| 	EndPageOnSameToken bool | ||||
| 
 | ||||
| 	started    bool | ||||
| 	prevTokens []interface{} | ||||
| 	nextTokens []interface{} | ||||
| 
 | ||||
| 	err     error | ||||
|  | @ -49,7 +53,15 @@ type Pagination struct { | |||
| //
 | ||||
| // Will always return true if Next has not been called yet.
 | ||||
| func (p *Pagination) HasNextPage() bool { | ||||
| 	return !(p.started && len(p.nextTokens) == 0) | ||||
| 	if !p.started { | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	hasNextPage := len(p.nextTokens) != 0 | ||||
| 	if p.EndPageOnSameToken { | ||||
| 		return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) | ||||
| 	} | ||||
| 	return hasNextPage | ||||
| } | ||||
| 
 | ||||
| // Err returns the error Pagination encountered when retrieving the next page.
 | ||||
|  | @ -96,6 +108,7 @@ func (p *Pagination) Next() bool { | |||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	p.prevTokens = p.nextTokens | ||||
| 	p.nextTokens = req.nextPageTokens() | ||||
| 	p.curPage = req.Data | ||||
| 
 | ||||
|  | @ -142,13 +155,28 @@ func (r *Request) nextPageTokens() []interface{} { | |||
| 	tokens := []interface{}{} | ||||
| 	tokenAdded := false | ||||
| 	for _, outToken := range r.Operation.OutputTokens { | ||||
| 		v, _ := awsutil.ValuesAtPath(r.Data, outToken) | ||||
| 		if len(v) > 0 { | ||||
| 			tokens = append(tokens, v[0]) | ||||
| 			tokenAdded = true | ||||
| 		} else { | ||||
| 		vs, _ := awsutil.ValuesAtPath(r.Data, outToken) | ||||
| 		if len(vs) == 0 { | ||||
| 			tokens = append(tokens, nil) | ||||
| 			continue | ||||
| 		} | ||||
| 		v := vs[0] | ||||
| 
 | ||||
| 		switch tv := v.(type) { | ||||
| 		case *string: | ||||
| 			if len(aws.StringValue(tv)) == 0 { | ||||
| 				tokens = append(tokens, nil) | ||||
| 				continue | ||||
| 			} | ||||
| 		case string: | ||||
| 			if len(tv) == 0 { | ||||
| 				tokens = append(tokens, nil) | ||||
| 				continue | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		tokenAdded = true | ||||
| 		tokens = append(tokens, v) | ||||
| 	} | ||||
| 	if !tokenAdded { | ||||
| 		return nil | ||||
|  |  | |||
|  | @ -5,6 +5,7 @@ import ( | |||
| 	"strconv" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws/credentials" | ||||
| 	"github.com/aws/aws-sdk-go/aws/defaults" | ||||
| ) | ||||
| 
 | ||||
| // EnvProviderName provides a name of the provider when config is loaded from environment.
 | ||||
|  | @ -95,9 +96,23 @@ type envConfig struct { | |||
| 	//
 | ||||
| 	//  AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
 | ||||
| 	CustomCABundle string | ||||
| 
 | ||||
| 	csmEnabled  string | ||||
| 	CSMEnabled  bool | ||||
| 	CSMPort     string | ||||
| 	CSMClientID string | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	csmEnabledEnvKey = []string{ | ||||
| 		"AWS_CSM_ENABLED", | ||||
| 	} | ||||
| 	csmPortEnvKey = []string{ | ||||
| 		"AWS_CSM_PORT", | ||||
| 	} | ||||
| 	csmClientIDEnvKey = []string{ | ||||
| 		"AWS_CSM_CLIENT_ID", | ||||
| 	} | ||||
| 	credAccessEnvKey = []string{ | ||||
| 		"AWS_ACCESS_KEY_ID", | ||||
| 		"AWS_ACCESS_KEY", | ||||
|  | @ -156,6 +171,12 @@ func envConfigLoad(enableSharedConfig bool) envConfig { | |||
| 	setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) | ||||
| 	setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) | ||||
| 
 | ||||
| 	// CSM environment variables
 | ||||
| 	setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) | ||||
| 	setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) | ||||
| 	setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) | ||||
| 	cfg.CSMEnabled = len(cfg.csmEnabled) > 0 | ||||
| 
 | ||||
| 	// Require logical grouping of credentials
 | ||||
| 	if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { | ||||
| 		cfg.Creds = credentials.Value{} | ||||
|  | @ -176,6 +197,13 @@ func envConfigLoad(enableSharedConfig bool) envConfig { | |||
| 	setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) | ||||
| 	setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) | ||||
| 
 | ||||
| 	if len(cfg.SharedCredentialsFile) == 0 { | ||||
| 		cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() | ||||
| 	} | ||||
| 	if len(cfg.SharedConfigFile) == 0 { | ||||
| 		cfg.SharedConfigFile = defaults.SharedConfigFilename() | ||||
| 	} | ||||
| 
 | ||||
| 	cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") | ||||
| 
 | ||||
| 	return cfg | ||||
|  |  | |||
|  | @ -15,6 +15,7 @@ import ( | |||
| 	"github.com/aws/aws-sdk-go/aws/corehandlers" | ||||
| 	"github.com/aws/aws-sdk-go/aws/credentials" | ||||
| 	"github.com/aws/aws-sdk-go/aws/credentials/stscreds" | ||||
| 	"github.com/aws/aws-sdk-go/aws/csm" | ||||
| 	"github.com/aws/aws-sdk-go/aws/defaults" | ||||
| 	"github.com/aws/aws-sdk-go/aws/endpoints" | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
|  | @ -26,7 +27,7 @@ import ( | |||
| // Sessions are safe to create service clients concurrently, but it is not safe
 | ||||
| // to mutate the Session concurrently.
 | ||||
| //
 | ||||
| // The Session satisfies the service client's client.ClientConfigProvider.
 | ||||
| // The Session satisfies the service client's client.ConfigProvider.
 | ||||
| type Session struct { | ||||
| 	Config   *aws.Config | ||||
| 	Handlers request.Handlers | ||||
|  | @ -58,7 +59,12 @@ func New(cfgs ...*aws.Config) *Session { | |||
| 	envCfg := loadEnvConfig() | ||||
| 
 | ||||
| 	if envCfg.EnableSharedConfig { | ||||
| 		s, err := newSession(Options{}, envCfg, cfgs...) | ||||
| 		var cfg aws.Config | ||||
| 		cfg.MergeIn(cfgs...) | ||||
| 		s, err := NewSessionWithOptions(Options{ | ||||
| 			Config:            cfg, | ||||
| 			SharedConfigState: SharedConfigEnable, | ||||
| 		}) | ||||
| 		if err != nil { | ||||
| 			// Old session.New expected all errors to be discovered when
 | ||||
| 			// a request is made, and would report the errors then. This
 | ||||
|  | @ -76,10 +82,16 @@ func New(cfgs ...*aws.Config) *Session { | |||
| 				r.Error = err | ||||
| 			}) | ||||
| 		} | ||||
| 
 | ||||
| 		return s | ||||
| 	} | ||||
| 
 | ||||
| 	return deprecatedNewSession(cfgs...) | ||||
| 	s := deprecatedNewSession(cfgs...) | ||||
| 	if envCfg.CSMEnabled { | ||||
| 		enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) | ||||
| 	} | ||||
| 
 | ||||
| 	return s | ||||
| } | ||||
| 
 | ||||
| // NewSession returns a new Session created from SDK defaults, config files,
 | ||||
|  | @ -243,13 +255,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) { | |||
| 		envCfg.EnableSharedConfig = true | ||||
| 	} | ||||
| 
 | ||||
| 	if len(envCfg.SharedCredentialsFile) == 0 { | ||||
| 		envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() | ||||
| 	} | ||||
| 	if len(envCfg.SharedConfigFile) == 0 { | ||||
| 		envCfg.SharedConfigFile = defaults.SharedConfigFilename() | ||||
| 	} | ||||
| 
 | ||||
| 	// Only use AWS_CA_BUNDLE if session option is not provided.
 | ||||
| 	if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { | ||||
| 		f, err := os.Open(envCfg.CustomCABundle) | ||||
|  | @ -302,10 +307,22 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { | |||
| 	} | ||||
| 
 | ||||
| 	initHandlers(s) | ||||
| 
 | ||||
| 	return s | ||||
| } | ||||
| 
 | ||||
| func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { | ||||
| 	logger.Log("Enabling CSM") | ||||
| 	if len(port) == 0 { | ||||
| 		port = csm.DefaultPort | ||||
| 	} | ||||
| 
 | ||||
| 	r, err := csm.Start(clientID, "127.0.0.1:"+port) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	r.InjectHandlers(handlers) | ||||
| } | ||||
| 
 | ||||
| func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { | ||||
| 	cfg := defaults.Config() | ||||
| 	handlers := defaults.Handlers() | ||||
|  | @ -345,6 +362,9 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, | |||
| 	} | ||||
| 
 | ||||
| 	initHandlers(s) | ||||
| 	if envCfg.CSMEnabled { | ||||
| 		enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) | ||||
| 	} | ||||
| 
 | ||||
| 	// Setup HTTP client with custom cert bundle if enabled
 | ||||
| 	if opts.CustomCABundle != nil { | ||||
|  | @ -577,6 +597,7 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( | |||
| 		Handlers:           s.Handlers, | ||||
| 		Endpoint:           resolved.URL, | ||||
| 		SigningRegion:      resolved.SigningRegion, | ||||
| 		SigningNameDerived: resolved.SigningNameDerived, | ||||
| 		SigningName:        resolved.SigningName, | ||||
| 	}, err | ||||
| } | ||||
|  | @ -601,6 +622,7 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf | |||
| 		Handlers:           s.Handlers, | ||||
| 		Endpoint:           resolved.URL, | ||||
| 		SigningRegion:      resolved.SigningRegion, | ||||
| 		SigningNameDerived: resolved.SigningNameDerived, | ||||
| 		SigningName:        resolved.SigningName, | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -71,6 +71,7 @@ import ( | |||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/aws/credentials" | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
| 	"github.com/aws/aws-sdk-go/internal/sdkio" | ||||
| 	"github.com/aws/aws-sdk-go/private/protocol/rest" | ||||
| ) | ||||
| 
 | ||||
|  | @ -134,6 +135,7 @@ var requiredSignedHeaders = rules{ | |||
| 			"X-Amz-Server-Side-Encryption-Customer-Key-Md5":               struct{}{}, | ||||
| 			"X-Amz-Storage-Class":                                         struct{}{}, | ||||
| 			"X-Amz-Website-Redirect-Location":                             struct{}{}, | ||||
| 			"X-Amz-Content-Sha256":                                        struct{}{}, | ||||
| 		}, | ||||
| 	}, | ||||
| 	patterns{"X-Amz-Meta-"}, | ||||
|  | @ -268,7 +270,7 @@ type signingCtx struct { | |||
| // "X-Amz-Content-Sha256" header with a precomputed value. The signer will
 | ||||
| // only compute the hash if the request header value is empty.
 | ||||
| func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { | ||||
| 	return v4.signWithBody(r, body, service, region, 0, signTime) | ||||
| 	return v4.signWithBody(r, body, service, region, 0, false, signTime) | ||||
| } | ||||
| 
 | ||||
| // Presign signs AWS v4 requests with the provided body, service name, region
 | ||||
|  | @ -302,10 +304,10 @@ func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region strin | |||
| // presigned request's signature you can set the "X-Amz-Content-Sha256"
 | ||||
| // HTTP header and that will be included in the request's signature.
 | ||||
| func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { | ||||
| 	return v4.signWithBody(r, body, service, region, exp, signTime) | ||||
| 	return v4.signWithBody(r, body, service, region, exp, true, signTime) | ||||
| } | ||||
| 
 | ||||
| func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { | ||||
| func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { | ||||
| 	currentTimeFn := v4.currentTimeFn | ||||
| 	if currentTimeFn == nil { | ||||
| 		currentTimeFn = time.Now | ||||
|  | @ -317,7 +319,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi | |||
| 		Query:                  r.URL.Query(), | ||||
| 		Time:                   signTime, | ||||
| 		ExpireTime:             exp, | ||||
| 		isPresign:              exp != 0, | ||||
| 		isPresign:              isPresign, | ||||
| 		ServiceName:            service, | ||||
| 		Region:                 region, | ||||
| 		DisableURIPathEscaping: v4.DisableURIPathEscaping, | ||||
|  | @ -339,8 +341,11 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi | |||
| 		return http.Header{}, err | ||||
| 	} | ||||
| 
 | ||||
| 	ctx.sanitizeHostForHeader() | ||||
| 	ctx.assignAmzQueryValues() | ||||
| 	ctx.build(v4.DisableHeaderHoisting) | ||||
| 	if err := ctx.build(v4.DisableHeaderHoisting); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	// If the request is not presigned the body should be attached to it. This
 | ||||
| 	// prevents the confusion of wanting to send a signed request without
 | ||||
|  | @ -363,6 +368,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi | |||
| 	return ctx.SignedHeaderVals, nil | ||||
| } | ||||
| 
 | ||||
| func (ctx *signingCtx) sanitizeHostForHeader() { | ||||
| 	request.SanitizeHostForHeader(ctx.Request) | ||||
| } | ||||
| 
 | ||||
| func (ctx *signingCtx) handlePresignRemoval() { | ||||
| 	if !ctx.isPresign { | ||||
| 		return | ||||
|  | @ -467,7 +476,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time | |||
| 	} | ||||
| 
 | ||||
| 	signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), | ||||
| 		name, region, req.ExpireTime, signingTime, | ||||
| 		name, region, req.ExpireTime, req.ExpireTime > 0, signingTime, | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		req.Error = err | ||||
|  | @ -498,11 +507,13 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) { | |||
| 	v4.Logger.Log(msg) | ||||
| } | ||||
| 
 | ||||
| func (ctx *signingCtx) build(disableHeaderHoisting bool) { | ||||
| func (ctx *signingCtx) build(disableHeaderHoisting bool) error { | ||||
| 	ctx.buildTime()             // no depends
 | ||||
| 	ctx.buildCredentialString() // no depends
 | ||||
| 
 | ||||
| 	ctx.buildBodyDigest() | ||||
| 	if err := ctx.buildBodyDigest(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	unsignedHeaders := ctx.Request.Header | ||||
| 	if ctx.isPresign { | ||||
|  | @ -530,6 +541,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { | |||
| 		} | ||||
| 		ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (ctx *signingCtx) buildTime() { | ||||
|  | @ -656,21 +669,34 @@ func (ctx *signingCtx) buildSignature() { | |||
| 	ctx.signature = hex.EncodeToString(signature) | ||||
| } | ||||
| 
 | ||||
| func (ctx *signingCtx) buildBodyDigest() { | ||||
| func (ctx *signingCtx) buildBodyDigest() error { | ||||
| 	hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") | ||||
| 	if hash == "" { | ||||
| 		if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { | ||||
| 		includeSHA256Header := ctx.unsignedPayload || | ||||
| 			ctx.ServiceName == "s3" || | ||||
| 			ctx.ServiceName == "glacier" | ||||
| 
 | ||||
| 		s3Presign := ctx.isPresign && ctx.ServiceName == "s3" | ||||
| 
 | ||||
| 		if ctx.unsignedPayload || s3Presign { | ||||
| 			hash = "UNSIGNED-PAYLOAD" | ||||
| 			includeSHA256Header = !s3Presign | ||||
| 		} else if ctx.Body == nil { | ||||
| 			hash = emptyStringSHA256 | ||||
| 		} else { | ||||
| 			if !aws.IsReaderSeekable(ctx.Body) { | ||||
| 				return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) | ||||
| 			} | ||||
| 			hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) | ||||
| 		} | ||||
| 		if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { | ||||
| 
 | ||||
| 		if includeSHA256Header { | ||||
| 			ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) | ||||
| 		} | ||||
| 	} | ||||
| 	ctx.bodyDigest = hash | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // isRequestSigned returns if the request is currently signed or presigned
 | ||||
|  | @ -710,8 +736,8 @@ func makeSha256(data []byte) []byte { | |||
| 
 | ||||
| func makeSha256Reader(reader io.ReadSeeker) []byte { | ||||
| 	hash := sha256.New() | ||||
| 	start, _ := reader.Seek(0, 1) | ||||
| 	defer reader.Seek(start, 0) | ||||
| 	start, _ := reader.Seek(0, sdkio.SeekCurrent) | ||||
| 	defer reader.Seek(start, sdkio.SeekStart) | ||||
| 
 | ||||
| 	io.Copy(hash, reader) | ||||
| 	return hash.Sum(nil) | ||||
|  |  | |||
|  | @ -3,6 +3,8 @@ package aws | |||
| import ( | ||||
| 	"io" | ||||
| 	"sync" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/internal/sdkio" | ||||
| ) | ||||
| 
 | ||||
| // ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
 | ||||
|  | @ -22,6 +24,22 @@ type ReaderSeekerCloser struct { | |||
| 	r io.Reader | ||||
| } | ||||
| 
 | ||||
| // IsReaderSeekable returns if the underlying reader type can be seeked. A
 | ||||
| // io.Reader might not actually be seekable if it is the ReaderSeekerCloser
 | ||||
| // type.
 | ||||
| func IsReaderSeekable(r io.Reader) bool { | ||||
| 	switch v := r.(type) { | ||||
| 	case ReaderSeekerCloser: | ||||
| 		return v.IsSeeker() | ||||
| 	case *ReaderSeekerCloser: | ||||
| 		return v.IsSeeker() | ||||
| 	case io.ReadSeeker: | ||||
| 		return true | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Read reads from the reader up to size of p. The number of bytes read, and
 | ||||
| // error if it occurred will be returned.
 | ||||
| //
 | ||||
|  | @ -56,6 +74,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool { | |||
| 	return ok | ||||
| } | ||||
| 
 | ||||
| // HasLen returns the length of the underlying reader if the value implements
 | ||||
| // the Len() int method.
 | ||||
| func (r ReaderSeekerCloser) HasLen() (int, bool) { | ||||
| 	type lenner interface { | ||||
| 		Len() int | ||||
| 	} | ||||
| 
 | ||||
| 	if lr, ok := r.r.(lenner); ok { | ||||
| 		return lr.Len(), true | ||||
| 	} | ||||
| 
 | ||||
| 	return 0, false | ||||
| } | ||||
| 
 | ||||
| // GetLen returns the length of the bytes remaining in the underlying reader.
 | ||||
| // Checks first for Len(), then io.Seeker to determine the size of the
 | ||||
| // underlying reader.
 | ||||
| //
 | ||||
| // Will return -1 if the length cannot be determined.
 | ||||
| func (r ReaderSeekerCloser) GetLen() (int64, error) { | ||||
| 	if l, ok := r.HasLen(); ok { | ||||
| 		return int64(l), nil | ||||
| 	} | ||||
| 
 | ||||
| 	if s, ok := r.r.(io.Seeker); ok { | ||||
| 		return seekerLen(s) | ||||
| 	} | ||||
| 
 | ||||
| 	return -1, nil | ||||
| } | ||||
| 
 | ||||
| // SeekerLen attempts to get the number of bytes remaining at the seeker's
 | ||||
| // current position.  Returns the number of bytes remaining or error.
 | ||||
| func SeekerLen(s io.Seeker) (int64, error) { | ||||
| 	// Determine if the seeker is actually seekable. ReaderSeekerCloser
 | ||||
| 	// hides the fact that a io.Readers might not actually be seekable.
 | ||||
| 	switch v := s.(type) { | ||||
| 	case ReaderSeekerCloser: | ||||
| 		return v.GetLen() | ||||
| 	case *ReaderSeekerCloser: | ||||
| 		return v.GetLen() | ||||
| 	} | ||||
| 
 | ||||
| 	return seekerLen(s) | ||||
| } | ||||
| 
 | ||||
| func seekerLen(s io.Seeker) (int64, error) { | ||||
| 	curOffset, err := s.Seek(0, sdkio.SeekCurrent) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	endOffset, err := s.Seek(0, sdkio.SeekEnd) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	_, err = s.Seek(curOffset, sdkio.SeekStart) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	return endOffset - curOffset, nil | ||||
| } | ||||
| 
 | ||||
| // Close closes the ReaderSeekerCloser.
 | ||||
| //
 | ||||
| // If the ReaderSeekerCloser is not an io.Closer nothing will be done.
 | ||||
|  |  | |||
|  | @ -5,4 +5,4 @@ package aws | |||
| const SDKName = "aws-sdk-go" | ||||
| 
 | ||||
| // SDKVersion is the version of this SDK
 | ||||
| const SDKVersion = "1.12.7" | ||||
| const SDKVersion = "1.14.12" | ||||
|  |  | |||
							
								
								
									
										10
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										10
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,10 @@ | |||
| // +build !go1.7
 | ||||
| 
 | ||||
| package sdkio | ||||
| 
 | ||||
| // Copy of Go 1.7 io package's Seeker constants.
 | ||||
| const ( | ||||
| 	SeekStart   = 0 // seek relative to the origin of the file
 | ||||
| 	SeekCurrent = 1 // seek relative to the current offset
 | ||||
| 	SeekEnd     = 2 // seek relative to the end
 | ||||
| ) | ||||
							
								
								
									
										12
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										12
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,12 @@ | |||
| // +build go1.7
 | ||||
| 
 | ||||
| package sdkio | ||||
| 
 | ||||
| import "io" | ||||
| 
 | ||||
| // Alias for Go 1.7 io package Seeker constants
 | ||||
| const ( | ||||
| 	SeekStart   = io.SeekStart   // seek relative to the origin of the file
 | ||||
| 	SeekCurrent = io.SeekCurrent // seek relative to the current offset
 | ||||
| 	SeekEnd     = io.SeekEnd     // seek relative to the end
 | ||||
| ) | ||||
							
								
								
									
										29
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										29
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,29 @@ | |||
| package sdkrand | ||||
| 
 | ||||
| import ( | ||||
| 	"math/rand" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| // lockedSource is a thread-safe implementation of rand.Source
 | ||||
| type lockedSource struct { | ||||
| 	lk  sync.Mutex | ||||
| 	src rand.Source | ||||
| } | ||||
| 
 | ||||
| func (r *lockedSource) Int63() (n int64) { | ||||
| 	r.lk.Lock() | ||||
| 	n = r.src.Int63() | ||||
| 	r.lk.Unlock() | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| func (r *lockedSource) Seed(seed int64) { | ||||
| 	r.lk.Lock() | ||||
| 	r.src.Seed(seed) | ||||
| 	r.lk.Unlock() | ||||
| } | ||||
| 
 | ||||
| // SeededRand is a new RNG using a thread safe implementation of rand.Source
 | ||||
| var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) | ||||
|  | @ -24,7 +24,7 @@ func Build(r *request.Request) { | |||
| 		r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if r.ExpireTime == 0 { | ||||
| 	if !r.IsPresigned() { | ||||
| 		r.HTTPRequest.Method = "POST" | ||||
| 		r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") | ||||
| 		r.SetBufferBody([]byte(body.Encode())) | ||||
|  |  | |||
|  | @ -12,6 +12,7 @@ import ( | |||
| 	"strconv" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/private/protocol" | ||||
| ) | ||||
| 
 | ||||
|  | @ -49,9 +50,12 @@ func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) err | |||
| 				t = "list" | ||||
| 			} | ||||
| 		case reflect.Map: | ||||
| 			// cannot be a JSONValue map
 | ||||
| 			if _, ok := value.Interface().(aws.JSONValue); !ok { | ||||
| 				t = "map" | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	switch t { | ||||
| 	case "structure": | ||||
|  | @ -210,14 +214,11 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro | |||
| 		} | ||||
| 		buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) | ||||
| 	default: | ||||
| 		switch value.Type() { | ||||
| 		case timeType: | ||||
| 			converted := v.Interface().(*time.Time) | ||||
| 
 | ||||
| 		switch converted := value.Interface().(type) { | ||||
| 		case time.Time: | ||||
| 			buf.Write(strconv.AppendInt(scratch[:0], converted.UTC().Unix(), 10)) | ||||
| 		case byteSliceType: | ||||
| 		case []byte: | ||||
| 			if !value.IsNil() { | ||||
| 				converted := value.Interface().([]byte) | ||||
| 				buf.WriteByte('"') | ||||
| 				if len(converted) < 1024 { | ||||
| 					// for small buffers, using Encode directly is much faster.
 | ||||
|  | @ -233,6 +234,12 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro | |||
| 				} | ||||
| 				buf.WriteByte('"') | ||||
| 			} | ||||
| 		case aws.JSONValue: | ||||
| 			str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("unable to encode JSONValue, %v", err) | ||||
| 			} | ||||
| 			buf.WriteString(str) | ||||
| 		default: | ||||
| 			return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) | ||||
| 		} | ||||
|  |  | |||
|  | @ -8,6 +8,9 @@ import ( | |||
| 	"io/ioutil" | ||||
| 	"reflect" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/private/protocol" | ||||
| ) | ||||
| 
 | ||||
| // UnmarshalJSON reads a stream and unmarshals the results in object v.
 | ||||
|  | @ -50,9 +53,12 @@ func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) | |||
| 				t = "list" | ||||
| 			} | ||||
| 		case reflect.Map: | ||||
| 			// cannot be a JSONValue map
 | ||||
| 			if _, ok := value.Interface().(aws.JSONValue); !ok { | ||||
| 				t = "map" | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	switch t { | ||||
| 	case "structure": | ||||
|  | @ -183,6 +189,13 @@ func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTa | |||
| 				return err | ||||
| 			} | ||||
| 			value.Set(reflect.ValueOf(b)) | ||||
| 		case aws.JSONValue: | ||||
| 			// No need to use escaping as the value is a non-quoted string.
 | ||||
| 			v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			value.Set(reflect.ValueOf(v)) | ||||
| 		default: | ||||
| 			return errf() | ||||
| 		} | ||||
|  |  | |||
							
								
								
									
										76
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										76
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,76 @@ | |||
| package protocol | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/base64" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| ) | ||||
| 
 | ||||
| // EscapeMode is the mode that should be use for escaping a value
 | ||||
| type EscapeMode uint | ||||
| 
 | ||||
| // The modes for escaping a value before it is marshaled, and unmarshaled.
 | ||||
| const ( | ||||
| 	NoEscape EscapeMode = iota | ||||
| 	Base64Escape | ||||
| 	QuotedEscape | ||||
| ) | ||||
| 
 | ||||
| // EncodeJSONValue marshals the value into a JSON string, and optionally base64
 | ||||
| // encodes the string before returning it.
 | ||||
| //
 | ||||
| // Will panic if the escape mode is unknown.
 | ||||
| func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { | ||||
| 	b, err := json.Marshal(v) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 
 | ||||
| 	switch escape { | ||||
| 	case NoEscape: | ||||
| 		return string(b), nil | ||||
| 	case Base64Escape: | ||||
| 		return base64.StdEncoding.EncodeToString(b), nil | ||||
| 	case QuotedEscape: | ||||
| 		return strconv.Quote(string(b)), nil | ||||
| 	} | ||||
| 
 | ||||
| 	panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) | ||||
| } | ||||
| 
 | ||||
| // DecodeJSONValue will attempt to decode the string input as a JSONValue.
 | ||||
| // Optionally decoding base64 the value first before JSON unmarshaling.
 | ||||
| //
 | ||||
| // Will panic if the escape mode is unknown.
 | ||||
| func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { | ||||
| 	var b []byte | ||||
| 	var err error | ||||
| 
 | ||||
| 	switch escape { | ||||
| 	case NoEscape: | ||||
| 		b = []byte(v) | ||||
| 	case Base64Escape: | ||||
| 		b, err = base64.StdEncoding.DecodeString(v) | ||||
| 	case QuotedEscape: | ||||
| 		var u string | ||||
| 		u, err = strconv.Unquote(v) | ||||
| 		b = []byte(u) | ||||
| 	default: | ||||
| 		panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) | ||||
| 	} | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	m := aws.JSONValue{} | ||||
| 	err = json.Unmarshal(b, &m) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return m, nil | ||||
| } | ||||
							
								
								
									
										81
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										81
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,81 @@ | |||
| package protocol | ||||
| 
 | ||||
| import ( | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"net/http" | ||||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/aws/client/metadata" | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
| ) | ||||
| 
 | ||||
| // PayloadUnmarshaler provides the interface for unmarshaling a payload's
 | ||||
| // reader into a SDK shape.
 | ||||
| type PayloadUnmarshaler interface { | ||||
| 	UnmarshalPayload(io.Reader, interface{}) error | ||||
| } | ||||
| 
 | ||||
| // HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a
 | ||||
| // HandlerList. This provides the support for unmarshaling a payload reader to
 | ||||
| // a shape without needing a SDK request first.
 | ||||
| type HandlerPayloadUnmarshal struct { | ||||
| 	Unmarshalers request.HandlerList | ||||
| } | ||||
| 
 | ||||
| // UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using
 | ||||
| // the Unmarshalers HandlerList provided. Returns an error if unable
 | ||||
| // unmarshaling fails.
 | ||||
| func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { | ||||
| 	req := &request.Request{ | ||||
| 		HTTPRequest: &http.Request{}, | ||||
| 		HTTPResponse: &http.Response{ | ||||
| 			StatusCode: 200, | ||||
| 			Header:     http.Header{}, | ||||
| 			Body:       ioutil.NopCloser(r), | ||||
| 		}, | ||||
| 		Data: v, | ||||
| 	} | ||||
| 
 | ||||
| 	h.Unmarshalers.Run(req) | ||||
| 
 | ||||
| 	return req.Error | ||||
| } | ||||
| 
 | ||||
| // PayloadMarshaler provides the interface for marshaling a SDK shape into and
 | ||||
| // io.Writer.
 | ||||
| type PayloadMarshaler interface { | ||||
| 	MarshalPayload(io.Writer, interface{}) error | ||||
| } | ||||
| 
 | ||||
| // HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList.
 | ||||
| // This provides support for marshaling a SDK shape into an io.Writer without
 | ||||
| // needing a SDK request first.
 | ||||
| type HandlerPayloadMarshal struct { | ||||
| 	Marshalers request.HandlerList | ||||
| } | ||||
| 
 | ||||
| // MarshalPayload marshals the SDK shape into the io.Writer using the
 | ||||
| // Marshalers HandlerList provided. Returns an error if unable if marshal
 | ||||
| // fails.
 | ||||
| func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { | ||||
| 	req := request.New( | ||||
| 		aws.Config{}, | ||||
| 		metadata.ClientInfo{}, | ||||
| 		request.Handlers{}, | ||||
| 		nil, | ||||
| 		&request.Operation{HTTPMethod: "GET"}, | ||||
| 		v, | ||||
| 		nil, | ||||
| 	) | ||||
| 
 | ||||
| 	h.Marshalers.Run(req) | ||||
| 
 | ||||
| 	if req.Error != nil { | ||||
| 		return req.Error | ||||
| 	} | ||||
| 
 | ||||
| 	io.Copy(w, req.GetBody()) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										2
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										2
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -25,7 +25,7 @@ func Build(r *request.Request) { | |||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	if r.ExpireTime == 0 { | ||||
| 	if !r.IsPresigned() { | ||||
| 		r.HTTPRequest.Method = "POST" | ||||
| 		r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") | ||||
| 		r.SetBufferBody([]byte(body.Encode())) | ||||
|  |  | |||
							
								
								
									
										29
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										29
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -4,7 +4,6 @@ package rest | |||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/base64" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
|  | @ -18,10 +17,13 @@ import ( | |||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/aws/awserr" | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
| 	"github.com/aws/aws-sdk-go/private/protocol" | ||||
| ) | ||||
| 
 | ||||
| // RFC822 returns an RFC822 formatted timestamp for AWS protocols
 | ||||
| const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" | ||||
| // RFC1123GMT is a RFC1123 (RFC822) formated timestame. This format is not
 | ||||
| // using the standard library's time.RFC1123 due to the desire to always use
 | ||||
| // GMT as the timezone.
 | ||||
| const RFC1123GMT = "Mon, 2 Jan 2006 15:04:05 GMT" | ||||
| 
 | ||||
| // Whether the byte value can be sent without escaping in AWS URLs
 | ||||
| var noEscape [256]bool | ||||
|  | @ -252,13 +254,12 @@ func EscapePath(path string, encodeSep bool) string { | |||
| 	return buf.String() | ||||
| } | ||||
| 
 | ||||
| func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { | ||||
| func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { | ||||
| 	v = reflect.Indirect(v) | ||||
| 	if !v.IsValid() { | ||||
| 		return "", errValueNotSet | ||||
| 	} | ||||
| 
 | ||||
| 	var str string | ||||
| 	switch value := v.Interface().(type) { | ||||
| 	case string: | ||||
| 		str = value | ||||
|  | @ -271,19 +272,21 @@ func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { | |||
| 	case float64: | ||||
| 		str = strconv.FormatFloat(value, 'f', -1, 64) | ||||
| 	case time.Time: | ||||
| 		str = value.UTC().Format(RFC822) | ||||
| 		str = value.UTC().Format(RFC1123GMT) | ||||
| 	case aws.JSONValue: | ||||
| 		b, err := json.Marshal(value) | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		if len(value) == 0 { | ||||
| 			return "", errValueNotSet | ||||
| 		} | ||||
| 		escaping := protocol.NoEscape | ||||
| 		if tag.Get("location") == "header" { | ||||
| 			str = base64.StdEncoding.EncodeToString(b) | ||||
| 		} else { | ||||
| 			str = string(b) | ||||
| 			escaping = protocol.Base64Escape | ||||
| 		} | ||||
| 		str, err = protocol.EncodeJSONValue(value, escaping) | ||||
| 		if err != nil { | ||||
| 			return "", fmt.Errorf("unable to encode JSONValue, %v", err) | ||||
| 		} | ||||
| 	default: | ||||
| 		err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) | ||||
| 		err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return str, nil | ||||
|  |  | |||
							
								
								
									
										16
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										16
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -3,7 +3,6 @@ package rest | |||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/base64" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
|  | @ -16,6 +15,7 @@ import ( | |||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/aws/awserr" | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
| 	"github.com/aws/aws-sdk-go/private/protocol" | ||||
| ) | ||||
| 
 | ||||
| // UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
 | ||||
|  | @ -198,23 +198,17 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro | |||
| 		} | ||||
| 		v.Set(reflect.ValueOf(&f)) | ||||
| 	case *time.Time: | ||||
| 		t, err := time.Parse(RFC822, header) | ||||
| 		t, err := time.Parse(time.RFC1123, header) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		v.Set(reflect.ValueOf(&t)) | ||||
| 	case aws.JSONValue: | ||||
| 		b := []byte(header) | ||||
| 		var err error | ||||
| 		escaping := protocol.NoEscape | ||||
| 		if tag.Get("location") == "header" { | ||||
| 			b, err = base64.StdEncoding.DecodeString(header) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			escaping = protocol.Base64Escape | ||||
| 		} | ||||
| 		} | ||||
| 
 | ||||
| 		m := aws.JSONValue{} | ||||
| 		err = json.Unmarshal(b, &m) | ||||
| 		m, err := protocol.DecodeJSONValue(header, escaping) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  |  | |||
|  | @ -52,9 +52,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { | |||
| 	if t == "" { | ||||
| 		switch rtype.Kind() { | ||||
| 		case reflect.Struct: | ||||
| 			// also it can't be a time object
 | ||||
| 			if _, ok := r.Interface().(*time.Time); !ok { | ||||
| 				t = "structure" | ||||
| 			} | ||||
| 		case reflect.Slice: | ||||
| 			// also it can't be a byte slice
 | ||||
| 			if _, ok := r.Interface().([]byte); !ok { | ||||
| 				t = "list" | ||||
| 			} | ||||
| 		case reflect.Map: | ||||
| 			t = "map" | ||||
| 		} | ||||
|  |  | |||
							
								
								
									
										824
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										824
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go
								
								
									generated
								
								
									vendored
								
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -3,9 +3,10 @@ | |||
| // Package autoscaling provides the client and types for making API
 | ||||
| // requests to Auto Scaling.
 | ||||
| //
 | ||||
| // Auto Scaling is designed to automatically launch or terminate EC2 instances
 | ||||
| // based on user-defined policies, schedules, and health checks. Use this service
 | ||||
| // in conjunction with the Amazon CloudWatch and Elastic Load Balancing services.
 | ||||
| // Amazon EC2 Auto Scaling is designed to automatically launch or terminate
 | ||||
| // EC2 instances based on user-defined policies, schedules, and health checks.
 | ||||
| // Use this service in conjunction with the AWS Auto Scaling, Amazon CloudWatch,
 | ||||
| // and Elastic Load Balancing services.
 | ||||
| //
 | ||||
| // See https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01 for more information on this service.
 | ||||
| //
 | ||||
|  | @ -14,7 +15,7 @@ | |||
| //
 | ||||
| // Using the Client
 | ||||
| //
 | ||||
| // To Auto Scaling with the SDK use the New function to create
 | ||||
| // To contact Auto Scaling with the SDK use the New function to create
 | ||||
| // a new service client. With that client you can make API requests to the service.
 | ||||
| // These clients are safe to use concurrently.
 | ||||
| //
 | ||||
|  |  | |||
							
								
								
									
										6
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										6
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -44,4 +44,10 @@ const ( | |||
| 	// The operation can't be performed because there are scaling activities in
 | ||||
| 	// progress.
 | ||||
| 	ErrCodeScalingActivityInProgressFault = "ScalingActivityInProgress" | ||||
| 
 | ||||
| 	// ErrCodeServiceLinkedRoleFailure for service response error code
 | ||||
| 	// "ServiceLinkedRoleFailure".
 | ||||
| 	//
 | ||||
| 	// The service-linked role is not yet ready for use.
 | ||||
| 	ErrCodeServiceLinkedRoleFailure = "ServiceLinkedRoleFailure" | ||||
| ) | ||||
|  |  | |||
							
								
								
									
										6
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										6
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -29,8 +29,9 @@ var initRequest func(*request.Request) | |||
| 
 | ||||
| // Service information constants
 | ||||
| const ( | ||||
| 	ServiceName = "autoscaling" // Service endpoint prefix API calls made to.
 | ||||
| 	EndpointsID = ServiceName   // Service ID for Regions and Endpoints metadata.
 | ||||
| 	ServiceName = "autoscaling"  // Name of service.
 | ||||
| 	EndpointsID = ServiceName    // ID to lookup a service endpoint with.
 | ||||
| 	ServiceID   = "Auto Scaling" // ServiceID is a unique identifer of a specific service.
 | ||||
| ) | ||||
| 
 | ||||
| // New creates a new instance of the AutoScaling client with a session.
 | ||||
|  | @ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio | |||
| 			cfg, | ||||
| 			metadata.ClientInfo{ | ||||
| 				ServiceName:   ServiceName, | ||||
| 				ServiceID:     ServiceID, | ||||
| 				SigningName:   signingName, | ||||
| 				SigningRegion: signingRegion, | ||||
| 				Endpoint:      endpoint, | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										53
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go
								
								
									generated
								
								
									vendored
								
								
							
							
						
						
									
										53
									
								
								cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go
								
								
									generated
								
								
									vendored
								
								
							|  | @ -5,11 +5,64 @@ import ( | |||
| 
 | ||||
| 	"github.com/aws/aws-sdk-go/aws" | ||||
| 	"github.com/aws/aws-sdk-go/aws/awsutil" | ||||
| 	"github.com/aws/aws-sdk-go/aws/client" | ||||
| 	"github.com/aws/aws-sdk-go/aws/endpoints" | ||||
| 	"github.com/aws/aws-sdk-go/aws/request" | ||||
| 	"github.com/aws/aws-sdk-go/internal/sdkrand" | ||||
| ) | ||||
| 
 | ||||
| type retryer struct { | ||||
| 	client.DefaultRetryer | ||||
| } | ||||
| 
 | ||||
| func (d retryer) RetryRules(r *request.Request) time.Duration { | ||||
| 	switch r.Operation.Name { | ||||
| 	case opModifyNetworkInterfaceAttribute: | ||||
| 		fallthrough | ||||
| 	case opAssignPrivateIpAddresses: | ||||
| 		return customRetryRule(r) | ||||
| 	default: | ||||
| 		return d.DefaultRetryer.RetryRules(r) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func customRetryRule(r *request.Request) time.Duration { | ||||
| 	retryTimes := []time.Duration{ | ||||
| 		time.Second, | ||||
| 		3 * time.Second, | ||||
| 		5 * time.Second, | ||||
| 	} | ||||
| 
 | ||||
| 	count := r.RetryCount | ||||
| 	if count >= len(retryTimes) { | ||||
| 		count = len(retryTimes) - 1 | ||||
| 	} | ||||
| 
 | ||||
| 	minTime := int(retryTimes[count]) | ||||
| 	return time.Duration(sdkrand.SeededRand.Intn(minTime) + minTime) | ||||
| } | ||||
| 
 | ||||
| func setCustomRetryer(c *client.Client) { | ||||
| 	maxRetries := aws.IntValue(c.Config.MaxRetries) | ||||
| 	if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { | ||||
| 		maxRetries = 3 | ||||
| 	} | ||||
| 
 | ||||
| 	c.Retryer = retryer{ | ||||
| 		DefaultRetryer: client.DefaultRetryer{ | ||||
| 			NumMaxRetries: maxRetries, | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func init() { | ||||
| 	initClient = func(c *client.Client) { | ||||
| 		if c.Config.Retryer == nil { | ||||
| 			// Only override the retryer with a custom one if the config
 | ||||
| 			// does not already contain a retryer
 | ||||
| 			setCustomRetryer(c) | ||||
| 		} | ||||
| 	} | ||||
| 	initRequest = func(r *request.Request) { | ||||
| 		if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter
 | ||||
| 			r.Handlers.Build.PushFront(fillPresignedURL) | ||||
|  |  | |||
|  | @ -4,9 +4,8 @@ | |||
| // requests to Amazon Elastic Compute Cloud.
 | ||||
| //
 | ||||
| // Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity
 | ||||
| // in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your
 | ||||
| // need to invest in hardware up front, so you can develop and deploy applications
 | ||||
| // faster.
 | ||||
| // in the AWS Cloud. Using Amazon EC2 eliminates the need to invest in hardware
 | ||||
| // up front, so you can develop and deploy applications faster.
 | ||||
| //
 | ||||
| // See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service.
 | ||||
| //
 | ||||
|  | @ -15,7 +14,7 @@ | |||
| //
 | ||||
| // Using the Client
 | ||||
| //
 | ||||
| // To Amazon Elastic Compute Cloud with the SDK use the New function to create
 | ||||
| // To contact Amazon Elastic Compute Cloud with the SDK use the New function to create
 | ||||
| // a new service client. With that client you can make API requests to the service.
 | ||||
| // These clients are safe to use concurrently.
 | ||||
| //
 | ||||
|  |  | |||
|  | @ -29,8 +29,9 @@ var initRequest func(*request.Request) | |||
| 
 | ||||
| // Service information constants
 | ||||
| const ( | ||||
| 	ServiceName = "ec2"       // Service endpoint prefix API calls made to.
 | ||||
| 	EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
 | ||||
| 	ServiceName = "ec2"       // Name of service.
 | ||||
| 	EndpointsID = ServiceName // ID to lookup a service endpoint with.
 | ||||
| 	ServiceID   = "EC2"       // ServiceID is a unique identifer of a specific service.
 | ||||
| ) | ||||
| 
 | ||||
| // New creates a new instance of the EC2 client with a session.
 | ||||
|  | @ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio | |||
| 			cfg, | ||||
| 			metadata.ClientInfo{ | ||||
| 				ServiceName:   ServiceName, | ||||
| 				ServiceID:     ServiceID, | ||||
| 				SigningName:   signingName, | ||||
| 				SigningRegion: signingRegion, | ||||
| 				Endpoint:      endpoint, | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -3,11 +3,11 @@ | |||
| // Package ecr provides the client and types for making API
 | ||||
| // requests to Amazon EC2 Container Registry.
 | ||||
| //
 | ||||
| // Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry
 | ||||
| // Amazon Elastic Container Registry (Amazon ECR) is a managed Docker registry
 | ||||
| // service. Customers can use the familiar Docker CLI to push, pull, and manage
 | ||||
| // images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon
 | ||||
| // ECR supports private Docker repositories with resource-based permissions
 | ||||
| // using AWS IAM so that specific users or Amazon EC2 instances can access repositories
 | ||||
| // using IAM so that specific users or Amazon EC2 instances can access repositories
 | ||||
| // and images. Developers can use the Docker CLI to author and manage images.
 | ||||
| //
 | ||||
| // See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service.
 | ||||
|  | @ -17,7 +17,7 @@ | |||
| //
 | ||||
| // Using the Client
 | ||||
| //
 | ||||
| // To Amazon EC2 Container Registry with the SDK use the New function to create
 | ||||
| // To contact Amazon EC2 Container Registry with the SDK use the New function to create
 | ||||
| // a new service client. With that client you can make API requests to the service.
 | ||||
| // These clients are safe to use concurrently.
 | ||||
| //
 | ||||
|  |  | |||
|  | @ -13,8 +13,8 @@ const ( | |||
| 	// ErrCodeImageAlreadyExistsException for service response error code
 | ||||
| 	// "ImageAlreadyExistsException".
 | ||||
| 	//
 | ||||
| 	// The specified image has already been pushed, and there are no changes to
 | ||||
| 	// the manifest or image tag since the last push.
 | ||||
| 	// The specified image has already been pushed, and there were no changes to
 | ||||
| 	// the manifest or image tag after the last push.
 | ||||
| 	ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException" | ||||
| 
 | ||||
| 	// ErrCodeImageNotFoundException for service response error code
 | ||||
|  | @ -70,13 +70,32 @@ const ( | |||
| 	// for this repository.
 | ||||
| 	ErrCodeLayersNotFoundException = "LayersNotFoundException" | ||||
| 
 | ||||
| 	// ErrCodeLifecyclePolicyNotFoundException for service response error code
 | ||||
| 	// "LifecyclePolicyNotFoundException".
 | ||||
| 	//
 | ||||
| 	// The lifecycle policy could not be found, and no policy is set to the repository.
 | ||||
| 	ErrCodeLifecyclePolicyNotFoundException = "LifecyclePolicyNotFoundException" | ||||
| 
 | ||||
| 	// ErrCodeLifecyclePolicyPreviewInProgressException for service response error code
 | ||||
| 	// "LifecyclePolicyPreviewInProgressException".
 | ||||
| 	//
 | ||||
| 	// The previous lifecycle policy preview request has not completed. Please try
 | ||||
| 	// again later.
 | ||||
| 	ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException" | ||||
| 
 | ||||
| 	// ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code
 | ||||
| 	// "LifecyclePolicyPreviewNotFoundException".
 | ||||
| 	//
 | ||||
| 	// There is no dry run for this repository.
 | ||||
| 	ErrCodeLifecyclePolicyPreviewNotFoundException = "LifecyclePolicyPreviewNotFoundException" | ||||
| 
 | ||||
| 	// ErrCodeLimitExceededException for service response error code
 | ||||
| 	// "LimitExceededException".
 | ||||
| 	//
 | ||||
| 	// The operation did not succeed because it would have exceeded a service limit
 | ||||
| 	// for your account. For more information, see Amazon ECR Default Service Limits
 | ||||
| 	// (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html)
 | ||||
| 	// in the Amazon EC2 Container Registry User Guide.
 | ||||
| 	// in the Amazon Elastic Container Registry User Guide.
 | ||||
| 	ErrCodeLimitExceededException = "LimitExceededException" | ||||
| 
 | ||||
| 	// ErrCodeRepositoryAlreadyExistsException for service response error code
 | ||||
|  |  | |||
|  | @ -29,8 +29,9 @@ var initRequest func(*request.Request) | |||
| 
 | ||||
| // Service information constants
 | ||||
| const ( | ||||
| 	ServiceName = "ecr"       // Service endpoint prefix API calls made to.
 | ||||
| 	EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
 | ||||
| 	ServiceName = "ecr"       // Name of service.
 | ||||
| 	EndpointsID = ServiceName // ID to lookup a service endpoint with.
 | ||||
| 	ServiceID   = "ECR"       // ServiceID is a unique identifer of a specific service.
 | ||||
| ) | ||||
| 
 | ||||
| // New creates a new instance of the ECR client with a session.
 | ||||
|  | @ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio | |||
| 			cfg, | ||||
| 			metadata.ClientInfo{ | ||||
| 				ServiceName:   ServiceName, | ||||
| 				ServiceID:     ServiceID, | ||||
| 				SigningName:   signingName, | ||||
| 				SigningRegion: signingRegion, | ||||
| 				Endpoint:      endpoint, | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -3,27 +3,22 @@ | |||
| // Package elb provides the client and types for making API
 | ||||
| // requests to Elastic Load Balancing.
 | ||||
| //
 | ||||
| // A load balancer distributes incoming traffic across your EC2 instances. This
 | ||||
| // enables you to increase the availability of your application. The load balancer
 | ||||
| // also monitors the health of its registered instances and ensures that it
 | ||||
| // routes traffic only to healthy instances. You configure your load balancer
 | ||||
| // to accept incoming traffic by specifying one or more listeners, which are
 | ||||
| // configured with a protocol and port number for connections from clients to
 | ||||
| // the load balancer and a protocol and port number for connections from the
 | ||||
| // load balancer to the instances.
 | ||||
| // A load balancer can distribute incoming traffic across your EC2 instances.
 | ||||
| // This enables you to increase the availability of your application. The load
 | ||||
| // balancer also monitors the health of its registered instances and ensures
 | ||||
| // that it routes traffic only to healthy instances. You configure your load
 | ||||
| // balancer to accept incoming traffic by specifying one or more listeners,
 | ||||
| // which are configured with a protocol and port number for connections from
 | ||||
| // clients to the load balancer and a protocol and port number for connections
 | ||||
| // from the load balancer to the instances.
 | ||||
| //
 | ||||
| // Elastic Load Balancing supports two types of load balancers: Classic Load
 | ||||
| // Balancers and Application Load Balancers (new). A Classic Load Balancer makes
 | ||||
| // routing and load balancing decisions either at the transport layer (TCP/SSL)
 | ||||
| // or the application layer (HTTP/HTTPS), and supports either EC2-Classic or
 | ||||
| // a VPC. An Application Load Balancer makes routing and load balancing decisions
 | ||||
| // at the application layer (HTTP/HTTPS), supports path-based routing, and can
 | ||||
| // route requests to one or more ports on each EC2 instance or container instance
 | ||||
| // in your virtual private cloud (VPC). For more information, see the Elastic
 | ||||
| // Load Balancing User Guide (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/what-is-load-balancing.html).
 | ||||
| // Elastic Load Balancing supports three types of load balancers: Application
 | ||||
| // Load Balancers, Network Load Balancers, and Classic Load Balancers. You can
 | ||||
| // select a load balancer based on your application needs. For more information,
 | ||||
| // see the Elastic Load Balancing User Guide (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/).
 | ||||
| //
 | ||||
| // This reference covers the 2012-06-01 API, which supports Classic Load Balancers.
 | ||||
| // The 2015-12-01 API supports Application Load Balancers.
 | ||||
| // The 2015-12-01 API supports Application Load Balancers and Network Load Balancers.
 | ||||
| //
 | ||||
| // To get started, create a load balancer with one or more listeners using CreateLoadBalancer.
 | ||||
| // Register your instances with the load balancer using RegisterInstancesWithLoadBalancer.
 | ||||
|  | @ -39,7 +34,7 @@ | |||
| //
 | ||||
| // Using the Client
 | ||||
| //
 | ||||
| // To Elastic Load Balancing with the SDK use the New function to create
 | ||||
| // To contact Elastic Load Balancing with the SDK use the New function to create
 | ||||
| // a new service client. With that client you can make API requests to the service.
 | ||||
| // These clients are safe to use concurrently.
 | ||||
| //
 | ||||
|  |  | |||
|  | @ -91,6 +91,12 @@ const ( | |||
| 	// The specified load balancer attribute does not exist.
 | ||||
| 	ErrCodeLoadBalancerAttributeNotFoundException = "LoadBalancerAttributeNotFound" | ||||
| 
 | ||||
| 	// ErrCodeOperationNotPermittedException for service response error code
 | ||||
| 	// "OperationNotPermitted".
 | ||||
| 	//
 | ||||
| 	// This operation is not allowed.
 | ||||
| 	ErrCodeOperationNotPermittedException = "OperationNotPermitted" | ||||
| 
 | ||||
| 	// ErrCodePolicyNotFoundException for service response error code
 | ||||
| 	// "PolicyNotFound".
 | ||||
| 	//
 | ||||
|  |  | |||
|  | @ -29,8 +29,9 @@ var initRequest func(*request.Request) | |||
| 
 | ||||
| // Service information constants
 | ||||
| const ( | ||||
| 	ServiceName = "elasticloadbalancing" // Service endpoint prefix API calls made to.
 | ||||
| 	EndpointsID = ServiceName            // Service ID for Regions and Endpoints metadata.
 | ||||
| 	ServiceName = "elasticloadbalancing"   // Name of service.
 | ||||
| 	EndpointsID = ServiceName              // ID to lookup a service endpoint with.
 | ||||
| 	ServiceID   = "Elastic Load Balancing" // ServiceID is a unique identifer of a specific service.
 | ||||
| ) | ||||
| 
 | ||||
| // New creates a new instance of the ELB client with a session.
 | ||||
|  | @ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio | |||
| 			cfg, | ||||
| 			metadata.ClientInfo{ | ||||
| 				ServiceName:   ServiceName, | ||||
| 				ServiceID:     ServiceID, | ||||
| 				SigningName:   signingName, | ||||
| 				SigningRegion: signingRegion, | ||||
| 				Endpoint:      endpoint, | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -60,7 +60,7 @@ | |||
| //
 | ||||
| // Using the Client
 | ||||
| //
 | ||||
| // To Elastic Load Balancing with the SDK use the New function to create
 | ||||
| // To contact Elastic Load Balancing with the SDK use the New function to create
 | ||||
| // a new service client. With that client you can make API requests to the service.
 | ||||
| // These clients are safe to use concurrently.
 | ||||
| //
 | ||||
|  |  | |||
|  | @ -65,6 +65,12 @@ const ( | |||
| 	// The requested configuration is not valid.
 | ||||
| 	ErrCodeInvalidConfigurationRequestException = "InvalidConfigurationRequest" | ||||
| 
 | ||||
| 	// ErrCodeInvalidLoadBalancerActionException for service response error code
 | ||||
| 	// "InvalidLoadBalancerAction".
 | ||||
| 	//
 | ||||
| 	// The requested action is not valid.
 | ||||
| 	ErrCodeInvalidLoadBalancerActionException = "InvalidLoadBalancerAction" | ||||
| 
 | ||||
| 	// ErrCodeInvalidSchemeException for service response error code
 | ||||
| 	// "InvalidScheme".
 | ||||
| 	//
 | ||||
|  | @ -86,8 +92,8 @@ const ( | |||
| 	// ErrCodeInvalidTargetException for service response error code
 | ||||
| 	// "InvalidTarget".
 | ||||
| 	//
 | ||||
| 	// The specified target does not exist or is not in the same VPC as the target
 | ||||
| 	// group.
 | ||||
| 	// The specified target does not exist, is not in the same VPC as the target
 | ||||
| 	// group, or has an unsupported instance type.
 | ||||
| 	ErrCodeInvalidTargetException = "InvalidTarget" | ||||
| 
 | ||||
| 	// ErrCodeListenerNotFoundException for service response error code
 | ||||
|  | @ -150,10 +156,16 @@ const ( | |||
| 	// The specified target group does not exist.
 | ||||
| 	ErrCodeTargetGroupNotFoundException = "TargetGroupNotFound" | ||||
| 
 | ||||
| 	// ErrCodeTooManyActionsException for service response error code
 | ||||
| 	// "TooManyActions".
 | ||||
| 	//
 | ||||
| 	// You've reached the limit on the number of actions per rule.
 | ||||
| 	ErrCodeTooManyActionsException = "TooManyActions" | ||||
| 
 | ||||
| 	// ErrCodeTooManyCertificatesException for service response error code
 | ||||
| 	// "TooManyCertificates".
 | ||||
| 	//
 | ||||
| 	// You've reached the limit on the number of certificates per listener.
 | ||||
| 	// You've reached the limit on the number of certificates per load balancer.
 | ||||
| 	ErrCodeTooManyCertificatesException = "TooManyCertificates" | ||||
| 
 | ||||
| 	// ErrCodeTooManyListenersException for service response error code
 | ||||
|  |  | |||
|  | @ -29,8 +29,9 @@ var initRequest func(*request.Request) | |||
| 
 | ||||
| // Service information constants
 | ||||
| const ( | ||||
| 	ServiceName = "elasticloadbalancing" // Service endpoint prefix API calls made to.
 | ||||
| 	EndpointsID = ServiceName            // Service ID for Regions and Endpoints metadata.
 | ||||
| 	ServiceName = "elasticloadbalancing"      // Name of service.
 | ||||
| 	EndpointsID = ServiceName                 // ID to lookup a service endpoint with.
 | ||||
| 	ServiceID   = "Elastic Load Balancing v2" // ServiceID is a unique identifer of a specific service.
 | ||||
| ) | ||||
| 
 | ||||
| // New creates a new instance of the ELBV2 client with a session.
 | ||||
|  | @ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio | |||
| 			cfg, | ||||
| 			metadata.ClientInfo{ | ||||
| 				ServiceName:   ServiceName, | ||||
| 				ServiceID:     ServiceID, | ||||
| 				SigningName:   signingName, | ||||
| 				SigningRegion: signingRegion, | ||||
| 				Endpoint:      endpoint, | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -82,7 +82,7 @@ | |||
| //
 | ||||
| // Using the Client
 | ||||
| //
 | ||||
| // To AWS Key Management Service with the SDK use the New function to create
 | ||||
| // To contact AWS Key Management Service with the SDK use the New function to create
 | ||||
| // a new service client. With that client you can make API requests to the service.
 | ||||
| // These clients are safe to use concurrently.
 | ||||
| //
 | ||||
|  |  | |||
|  | @ -28,9 +28,8 @@ const ( | |||
| 	// "ExpiredImportTokenException".
 | ||||
| 	//
 | ||||
| 	// The request was rejected because the provided import token is expired. Use
 | ||||
| 	// GetParametersForImport to retrieve a new import token and public key, use
 | ||||
| 	// the new public key to encrypt the key material, and then try the request
 | ||||
| 	// again.
 | ||||
| 	// GetParametersForImport to get a new import token and public key, use the
 | ||||
| 	// new public key to encrypt the key material, and then try the request again.
 | ||||
| 	ErrCodeExpiredImportTokenException = "ExpiredImportTokenException" | ||||
| 
 | ||||
| 	// ErrCodeIncorrectKeyMaterialException for service response error code
 | ||||
|  | @ -42,11 +41,11 @@ const ( | |||
| 	ErrCodeIncorrectKeyMaterialException = "IncorrectKeyMaterialException" | ||||
| 
 | ||||
| 	// ErrCodeInternalException for service response error code
 | ||||
| 	// "InternalException".
 | ||||
| 	// "KMSInternalException".
 | ||||
| 	//
 | ||||
| 	// The request was rejected because an internal exception occurred. The request
 | ||||
| 	// can be retried.
 | ||||
| 	ErrCodeInternalException = "InternalException" | ||||
| 	ErrCodeInternalException = "KMSInternalException" | ||||
| 
 | ||||
| 	// ErrCodeInvalidAliasNameException for service response error code
 | ||||
| 	// "InvalidAliasNameException".
 | ||||
|  | @ -63,8 +62,9 @@ const ( | |||
| 	// ErrCodeInvalidCiphertextException for service response error code
 | ||||
| 	// "InvalidCiphertextException".
 | ||||
| 	//
 | ||||
| 	// The request was rejected because the specified ciphertext has been corrupted
 | ||||
| 	// or is otherwise invalid.
 | ||||
| 	// The request was rejected because the specified ciphertext, or additional
 | ||||
| 	// authenticated data incorporated into the ciphertext, such as the encryption
 | ||||
| 	// context, is corrupted, missing, or otherwise invalid.
 | ||||
| 	ErrCodeInvalidCiphertextException = "InvalidCiphertextException" | ||||
| 
 | ||||
| 	// ErrCodeInvalidGrantIdException for service response error code
 | ||||
|  | @ -100,7 +100,7 @@ const ( | |||
| 	ErrCodeInvalidMarkerException = "InvalidMarkerException" | ||||
| 
 | ||||
| 	// ErrCodeInvalidStateException for service response error code
 | ||||
| 	// "InvalidStateException".
 | ||||
| 	// "KMSInvalidStateException".
 | ||||
| 	//
 | ||||
| 	// The request was rejected because the state of the specified resource is not
 | ||||
| 	// valid for this request.
 | ||||
|  | @ -108,7 +108,7 @@ const ( | |||
| 	// For more information about how key state affects the use of a CMK, see How
 | ||||
| 	// Key State Affects Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
 | ||||
| 	// in the AWS Key Management Service Developer Guide.
 | ||||
| 	ErrCodeInvalidStateException = "InvalidStateException" | ||||
| 	ErrCodeInvalidStateException = "KMSInvalidStateException" | ||||
| 
 | ||||
| 	// ErrCodeKeyUnavailableException for service response error code
 | ||||
| 	// "KeyUnavailableException".
 | ||||
|  |  | |||
|  | @ -29,8 +29,9 @@ var initRequest func(*request.Request) | |||
| 
 | ||||
| // Service information constants
 | ||||
| const ( | ||||
| 	ServiceName = "kms"       // Service endpoint prefix API calls made to.
 | ||||
| 	EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
 | ||||
| 	ServiceName = "kms"       // Name of service.
 | ||||
| 	EndpointsID = ServiceName // ID to lookup a service endpoint with.
 | ||||
| 	ServiceID   = "KMS"       // ServiceID is a unique identifer of a specific service.
 | ||||
| ) | ||||
| 
 | ||||
| // New creates a new instance of the KMS client with a session.
 | ||||
|  | @ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio | |||
| 			cfg, | ||||
| 			metadata.ClientInfo{ | ||||
| 				ServiceName:   ServiceName, | ||||
| 				ServiceID:     ServiceID, | ||||
| 				SigningName:   signingName, | ||||
| 				SigningRegion: signingRegion, | ||||
| 				Endpoint:      endpoint, | ||||
|  |  | |||
|  | @ -14,7 +14,7 @@ const opAssumeRole = "AssumeRole" | |||
| 
 | ||||
| // AssumeRoleRequest generates a "aws/request.Request" representing the
 | ||||
| // client's request for the AssumeRole operation. The "output" return
 | ||||
| // value will be populated with the request's response once the request complets
 | ||||
| // value will be populated with the request's response once the request completes
 | ||||
| // successfuly.
 | ||||
| //
 | ||||
| // Use "Send" method on the returned Request to send the API call to the service.
 | ||||
|  | @ -35,7 +35,7 @@ const opAssumeRole = "AssumeRole" | |||
| //        fmt.Println(resp)
 | ||||
| //    }
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
 | ||||
| func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { | ||||
| 	op := &request.Operation{ | ||||
| 		Name:       opAssumeRole, | ||||
|  | @ -88,9 +88,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o | |||
| // Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
 | ||||
| // in the IAM User Guide.
 | ||||
| //
 | ||||
| // The temporary security credentials are valid for the duration that you specified
 | ||||
| // when calling AssumeRole, which can be from 900 seconds (15 minutes) to a
 | ||||
| // maximum of 3600 seconds (1 hour). The default is 1 hour.
 | ||||
| // By default, the temporary security credentials created by AssumeRole last
 | ||||
| // for one hour. However, you can use the optional DurationSeconds parameter
 | ||||
| // to specify the duration of your session. You can provide a value from 900
 | ||||
| // seconds (15 minutes) up to the maximum session duration setting for the role.
 | ||||
| // This setting can have a value from 1 hour to 12 hours. To learn how to view
 | ||||
| // the maximum value for your role, see View the Maximum Session Duration Setting
 | ||||
| // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
 | ||||
| // in the IAM User Guide. The maximum session duration limit applies when you
 | ||||
| // use the AssumeRole* API operations or the assume-role* CLI operations but
 | ||||
| // does not apply when you use those operations to create a console URL. For
 | ||||
| // more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
 | ||||
| // in the IAM User Guide.
 | ||||
| //
 | ||||
| // The temporary security credentials created by AssumeRole can be used to make
 | ||||
| // API calls to any AWS service with the following exception: you cannot call
 | ||||
|  | @ -121,7 +130,12 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o | |||
| // the user to call AssumeRole on the ARN of the role in the other account.
 | ||||
| // If the user is in the same account as the role, then you can either attach
 | ||||
| // a policy to the user (identical to the previous different account user),
 | ||||
| // or you can add the user as a principal directly in the role's trust policy
 | ||||
| // or you can add the user as a principal directly in the role's trust policy.
 | ||||
| // In this case, the trust policy acts as the only resource-based policy in
 | ||||
| // IAM, and users in the same account as the role do not need explicit permission
 | ||||
| // to assume the role. For more information about trust policies and resource-based
 | ||||
| // policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
 | ||||
| // in the IAM User Guide.
 | ||||
| //
 | ||||
| // Using MFA with AssumeRole
 | ||||
| //
 | ||||
|  | @ -168,7 +182,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o | |||
| //   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 | ||||
| //   in the IAM User Guide.
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
 | ||||
| func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { | ||||
| 	req, out := c.AssumeRoleRequest(input) | ||||
| 	return out, req.Send() | ||||
|  | @ -194,7 +208,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" | |||
| 
 | ||||
| // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
 | ||||
| // client's request for the AssumeRoleWithSAML operation. The "output" return
 | ||||
| // value will be populated with the request's response once the request complets
 | ||||
| // value will be populated with the request's response once the request completes
 | ||||
| // successfuly.
 | ||||
| //
 | ||||
| // Use "Send" method on the returned Request to send the API call to the service.
 | ||||
|  | @ -215,7 +229,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" | |||
| //        fmt.Println(resp)
 | ||||
| //    }
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
 | ||||
| func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { | ||||
| 	op := &request.Operation{ | ||||
| 		Name:       opAssumeRoleWithSAML, | ||||
|  | @ -247,11 +261,20 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re | |||
| // an access key ID, a secret access key, and a security token. Applications
 | ||||
| // can use these temporary security credentials to sign calls to AWS services.
 | ||||
| //
 | ||||
| // The temporary security credentials are valid for the duration that you specified
 | ||||
| // when calling AssumeRole, or until the time specified in the SAML authentication
 | ||||
| // response's SessionNotOnOrAfter value, whichever is shorter. The duration
 | ||||
| // can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour).
 | ||||
| // The default is 1 hour.
 | ||||
| // By default, the temporary security credentials created by AssumeRoleWithSAML
 | ||||
| // last for one hour. However, you can use the optional DurationSeconds parameter
 | ||||
| // to specify the duration of your session. Your role session lasts for the
 | ||||
| // duration that you specify, or until the time specified in the SAML authentication
 | ||||
| // response's SessionNotOnOrAfter value, whichever is shorter. You can provide
 | ||||
| // a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
 | ||||
| // duration setting for the role. This setting can have a value from 1 hour
 | ||||
| // to 12 hours. To learn how to view the maximum value for your role, see View
 | ||||
| // the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
 | ||||
| // in the IAM User Guide. The maximum session duration limit applies when you
 | ||||
| // use the AssumeRole* API operations or the assume-role* CLI operations but
 | ||||
| // does not apply when you use those operations to create a console URL. For
 | ||||
| // more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
 | ||||
| // in the IAM User Guide.
 | ||||
| //
 | ||||
| // The temporary security credentials created by AssumeRoleWithSAML can be used
 | ||||
| // to make API calls to any AWS service with the following exception: you cannot
 | ||||
|  | @ -341,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re | |||
| //   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 | ||||
| //   in the IAM User Guide.
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
 | ||||
| func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { | ||||
| 	req, out := c.AssumeRoleWithSAMLRequest(input) | ||||
| 	return out, req.Send() | ||||
|  | @ -367,7 +390,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" | |||
| 
 | ||||
| // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
 | ||||
| // client's request for the AssumeRoleWithWebIdentity operation. The "output" return
 | ||||
| // value will be populated with the request's response once the request complets
 | ||||
| // value will be populated with the request's response once the request completes
 | ||||
| // successfuly.
 | ||||
| //
 | ||||
| // Use "Send" method on the returned Request to send the API call to the service.
 | ||||
|  | @ -388,7 +411,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" | |||
| //        fmt.Println(resp)
 | ||||
| //    }
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
 | ||||
| func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { | ||||
| 	op := &request.Operation{ | ||||
| 		Name:       opAssumeRoleWithWebIdentity, | ||||
|  | @ -438,9 +461,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI | |||
| // key ID, a secret access key, and a security token. Applications can use these
 | ||||
| // temporary security credentials to sign calls to AWS service APIs.
 | ||||
| //
 | ||||
| // The credentials are valid for the duration that you specified when calling
 | ||||
| // AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to
 | ||||
| // a maximum of 3600 seconds (1 hour). The default is 1 hour.
 | ||||
| // By default, the temporary security credentials created by AssumeRoleWithWebIdentity
 | ||||
| // last for one hour. However, you can use the optional DurationSeconds parameter
 | ||||
| // to specify the duration of your session. You can provide a value from 900
 | ||||
| // seconds (15 minutes) up to the maximum session duration setting for the role.
 | ||||
| // This setting can have a value from 1 hour to 12 hours. To learn how to view
 | ||||
| // the maximum value for your role, see View the Maximum Session Duration Setting
 | ||||
| // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
 | ||||
| // in the IAM User Guide. The maximum session duration limit applies when you
 | ||||
| // use the AssumeRole* API operations or the assume-role* CLI operations but
 | ||||
| // does not apply when you use those operations to create a console URL. For
 | ||||
| // more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
 | ||||
| // in the IAM User Guide.
 | ||||
| //
 | ||||
| // The temporary security credentials created by AssumeRoleWithWebIdentity can
 | ||||
| // be used to make API calls to any AWS service with the following exception:
 | ||||
|  | @ -492,7 +524,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI | |||
| //    the information from these providers to get and use temporary security
 | ||||
| //    credentials.
 | ||||
| //
 | ||||
| //    * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
 | ||||
| //    * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
 | ||||
| //    This article discusses web identity federation and shows an example of
 | ||||
| //    how to use web identity federation to get access to content in Amazon
 | ||||
| //    S3.
 | ||||
|  | @ -543,7 +575,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI | |||
| //   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 | ||||
| //   in the IAM User Guide.
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
 | ||||
| func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { | ||||
| 	req, out := c.AssumeRoleWithWebIdentityRequest(input) | ||||
| 	return out, req.Send() | ||||
|  | @ -569,7 +601,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" | |||
| 
 | ||||
| // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
 | ||||
| // client's request for the DecodeAuthorizationMessage operation. The "output" return
 | ||||
| // value will be populated with the request's response once the request complets
 | ||||
| // value will be populated with the request's response once the request completes
 | ||||
| // successfuly.
 | ||||
| //
 | ||||
| // Use "Send" method on the returned Request to send the API call to the service.
 | ||||
|  | @ -590,7 +622,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" | |||
| //        fmt.Println(resp)
 | ||||
| //    }
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
 | ||||
| func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { | ||||
| 	op := &request.Operation{ | ||||
| 		Name:       opDecodeAuthorizationMessage, | ||||
|  | @ -655,7 +687,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag | |||
| //   invalid. This can happen if the token contains invalid characters, such as
 | ||||
| //   linebreaks.
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
 | ||||
| func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { | ||||
| 	req, out := c.DecodeAuthorizationMessageRequest(input) | ||||
| 	return out, req.Send() | ||||
|  | @ -681,7 +713,7 @@ const opGetCallerIdentity = "GetCallerIdentity" | |||
| 
 | ||||
| // GetCallerIdentityRequest generates a "aws/request.Request" representing the
 | ||||
| // client's request for the GetCallerIdentity operation. The "output" return
 | ||||
| // value will be populated with the request's response once the request complets
 | ||||
| // value will be populated with the request's response once the request completes
 | ||||
| // successfuly.
 | ||||
| //
 | ||||
| // Use "Send" method on the returned Request to send the API call to the service.
 | ||||
|  | @ -702,7 +734,7 @@ const opGetCallerIdentity = "GetCallerIdentity" | |||
| //        fmt.Println(resp)
 | ||||
| //    }
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
 | ||||
| func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { | ||||
| 	op := &request.Operation{ | ||||
| 		Name:       opGetCallerIdentity, | ||||
|  | @ -730,7 +762,7 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ | |||
| //
 | ||||
| // See the AWS API reference guide for AWS Security Token Service's
 | ||||
| // API operation GetCallerIdentity for usage and error information.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
 | ||||
| func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { | ||||
| 	req, out := c.GetCallerIdentityRequest(input) | ||||
| 	return out, req.Send() | ||||
|  | @ -756,7 +788,7 @@ const opGetFederationToken = "GetFederationToken" | |||
| 
 | ||||
| // GetFederationTokenRequest generates a "aws/request.Request" representing the
 | ||||
| // client's request for the GetFederationToken operation. The "output" return
 | ||||
| // value will be populated with the request's response once the request complets
 | ||||
| // value will be populated with the request's response once the request completes
 | ||||
| // successfuly.
 | ||||
| //
 | ||||
| // Use "Send" method on the returned Request to send the API call to the service.
 | ||||
|  | @ -777,7 +809,7 @@ const opGetFederationToken = "GetFederationToken" | |||
| //        fmt.Println(resp)
 | ||||
| //    }
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
 | ||||
| func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { | ||||
| 	op := &request.Operation{ | ||||
| 		Name:       opGetFederationToken, | ||||
|  | @ -899,7 +931,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re | |||
| //   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 | ||||
| //   in the IAM User Guide.
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
 | ||||
| func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { | ||||
| 	req, out := c.GetFederationTokenRequest(input) | ||||
| 	return out, req.Send() | ||||
|  | @ -925,7 +957,7 @@ const opGetSessionToken = "GetSessionToken" | |||
| 
 | ||||
| // GetSessionTokenRequest generates a "aws/request.Request" representing the
 | ||||
| // client's request for the GetSessionToken operation. The "output" return
 | ||||
| // value will be populated with the request's response once the request complets
 | ||||
| // value will be populated with the request's response once the request completes
 | ||||
| // successfuly.
 | ||||
| //
 | ||||
| // Use "Send" method on the returned Request to send the API call to the service.
 | ||||
|  | @ -946,7 +978,7 @@ const opGetSessionToken = "GetSessionToken" | |||
| //        fmt.Println(resp)
 | ||||
| //    }
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
 | ||||
| func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { | ||||
| 	op := &request.Operation{ | ||||
| 		Name:       opGetSessionToken, | ||||
|  | @ -1027,7 +1059,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. | |||
| //   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 | ||||
| //   in the IAM User Guide.
 | ||||
| //
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
 | ||||
| // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
 | ||||
| func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { | ||||
| 	req, out := c.GetSessionTokenRequest(input) | ||||
| 	return out, req.Send() | ||||
|  | @ -1049,20 +1081,27 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken | |||
| 	return out, req.Send() | ||||
| } | ||||
| 
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest
 | ||||
| type AssumeRoleInput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
| 	// The duration, in seconds, of the role session. The value can range from 900
 | ||||
| 	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
 | ||||
| 	// to 3600 seconds.
 | ||||
| 	// seconds (15 minutes) up to the maximum session duration setting for the role.
 | ||||
| 	// This setting can have a value from 1 hour to 12 hours. If you specify a value
 | ||||
| 	// higher than this setting, the operation fails. For example, if you specify
 | ||||
| 	// a session duration of 12 hours, but your administrator set the maximum session
 | ||||
| 	// duration to 6 hours, your operation fails. To learn how to view the maximum
 | ||||
| 	// value for your role, see View the Maximum Session Duration Setting for a
 | ||||
| 	// Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
 | ||||
| 	// in the IAM User Guide.
 | ||||
| 	//
 | ||||
| 	// This is separate from the duration of a console session that you might request
 | ||||
| 	// using the returned credentials. The request to the federation endpoint for
 | ||||
| 	// a console sign-in token takes a SessionDuration parameter that specifies
 | ||||
| 	// the maximum length of the console session, separately from the DurationSeconds
 | ||||
| 	// parameter on this API. For more information, see Creating a URL that Enables
 | ||||
| 	// Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
 | ||||
| 	// By default, the value is set to 3600 seconds.
 | ||||
| 	//
 | ||||
| 	// The DurationSeconds parameter is separate from the duration of a console
 | ||||
| 	// session that you might request using the returned credentials. The request
 | ||||
| 	// to the federation endpoint for a console sign-in token takes a SessionDuration
 | ||||
| 	// parameter that specifies the maximum length of the console session. For more
 | ||||
| 	// information, see Creating a URL that Enables Federated Users to Access the
 | ||||
| 	// AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
 | ||||
| 	// in the IAM User Guide.
 | ||||
| 	DurationSeconds *int64 `min:"900" type:"integer"` | ||||
| 
 | ||||
|  | @ -1241,7 +1280,6 @@ func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { | |||
| 
 | ||||
| // Contains the response to a successful AssumeRole request, including temporary
 | ||||
| // AWS credentials that can be used to make AWS requests.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse
 | ||||
| type AssumeRoleOutput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -1295,22 +1333,30 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { | |||
| 	return s | ||||
| } | ||||
| 
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest
 | ||||
| type AssumeRoleWithSAMLInput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
| 	// The duration, in seconds, of the role session. The value can range from 900
 | ||||
| 	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
 | ||||
| 	// to 3600 seconds. An expiration can also be specified in the SAML authentication
 | ||||
| 	// response's SessionNotOnOrAfter value. The actual expiration time is whichever
 | ||||
| 	// value is shorter.
 | ||||
| 	// The duration, in seconds, of the role session. Your role session lasts for
 | ||||
| 	// the duration that you specify for the DurationSeconds parameter, or until
 | ||||
| 	// the time specified in the SAML authentication response's SessionNotOnOrAfter
 | ||||
| 	// value, whichever is shorter. You can provide a DurationSeconds value from
 | ||||
| 	// 900 seconds (15 minutes) up to the maximum session duration setting for the
 | ||||
| 	// role. This setting can have a value from 1 hour to 12 hours. If you specify
 | ||||
| 	// a value higher than this setting, the operation fails. For example, if you
 | ||||
| 	// specify a session duration of 12 hours, but your administrator set the maximum
 | ||||
| 	// session duration to 6 hours, your operation fails. To learn how to view the
 | ||||
| 	// maximum value for your role, see View the Maximum Session Duration Setting
 | ||||
| 	// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
 | ||||
| 	// in the IAM User Guide.
 | ||||
| 	//
 | ||||
| 	// This is separate from the duration of a console session that you might request
 | ||||
| 	// using the returned credentials. The request to the federation endpoint for
 | ||||
| 	// a console sign-in token takes a SessionDuration parameter that specifies
 | ||||
| 	// the maximum length of the console session, separately from the DurationSeconds
 | ||||
| 	// parameter on this API. For more information, see Enabling SAML 2.0 Federated
 | ||||
| 	// Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html)
 | ||||
| 	// By default, the value is set to 3600 seconds.
 | ||||
| 	//
 | ||||
| 	// The DurationSeconds parameter is separate from the duration of a console
 | ||||
| 	// session that you might request using the returned credentials. The request
 | ||||
| 	// to the federation endpoint for a console sign-in token takes a SessionDuration
 | ||||
| 	// parameter that specifies the maximum length of the console session. For more
 | ||||
| 	// information, see Creating a URL that Enables Federated Users to Access the
 | ||||
| 	// AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
 | ||||
| 	// in the IAM User Guide.
 | ||||
| 	DurationSeconds *int64 `min:"900" type:"integer"` | ||||
| 
 | ||||
|  | @ -1436,7 +1482,6 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML | |||
| 
 | ||||
| // Contains the response to a successful AssumeRoleWithSAML request, including
 | ||||
| // temporary AWS credentials that can be used to make AWS requests.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse
 | ||||
| type AssumeRoleWithSAMLOutput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -1548,20 +1593,27 @@ func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLO | |||
| 	return s | ||||
| } | ||||
| 
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest
 | ||||
| type AssumeRoleWithWebIdentityInput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
| 	// The duration, in seconds, of the role session. The value can range from 900
 | ||||
| 	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
 | ||||
| 	// to 3600 seconds.
 | ||||
| 	// seconds (15 minutes) up to the maximum session duration setting for the role.
 | ||||
| 	// This setting can have a value from 1 hour to 12 hours. If you specify a value
 | ||||
| 	// higher than this setting, the operation fails. For example, if you specify
 | ||||
| 	// a session duration of 12 hours, but your administrator set the maximum session
 | ||||
| 	// duration to 6 hours, your operation fails. To learn how to view the maximum
 | ||||
| 	// value for your role, see View the Maximum Session Duration Setting for a
 | ||||
| 	// Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
 | ||||
| 	// in the IAM User Guide.
 | ||||
| 	//
 | ||||
| 	// This is separate from the duration of a console session that you might request
 | ||||
| 	// using the returned credentials. The request to the federation endpoint for
 | ||||
| 	// a console sign-in token takes a SessionDuration parameter that specifies
 | ||||
| 	// the maximum length of the console session, separately from the DurationSeconds
 | ||||
| 	// parameter on this API. For more information, see Creating a URL that Enables
 | ||||
| 	// Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
 | ||||
| 	// By default, the value is set to 3600 seconds.
 | ||||
| 	//
 | ||||
| 	// The DurationSeconds parameter is separate from the duration of a console
 | ||||
| 	// session that you might request using the returned credentials. The request
 | ||||
| 	// to the federation endpoint for a console sign-in token takes a SessionDuration
 | ||||
| 	// parameter that specifies the maximum length of the console session. For more
 | ||||
| 	// information, see Creating a URL that Enables Federated Users to Access the
 | ||||
| 	// AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
 | ||||
| 	// in the IAM User Guide.
 | ||||
| 	DurationSeconds *int64 `min:"900" type:"integer"` | ||||
| 
 | ||||
|  | @ -1711,7 +1763,6 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo | |||
| 
 | ||||
| // Contains the response to a successful AssumeRoleWithWebIdentity request,
 | ||||
| // including temporary AWS credentials that can be used to make AWS requests.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse
 | ||||
| type AssumeRoleWithWebIdentityOutput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -1804,7 +1855,6 @@ func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v strin | |||
| 
 | ||||
| // The identifiers for the temporary security credentials that the operation
 | ||||
| // returns.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
 | ||||
| type AssumedRoleUser struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -1847,7 +1897,6 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { | |||
| } | ||||
| 
 | ||||
| // AWS credentials for API authentication.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials
 | ||||
| type Credentials struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -1906,7 +1955,6 @@ func (s *Credentials) SetSessionToken(v string) *Credentials { | |||
| 	return s | ||||
| } | ||||
| 
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest
 | ||||
| type DecodeAuthorizationMessageInput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -1951,7 +1999,6 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut | |||
| // A document that contains additional information about the authorization status
 | ||||
| // of a request from an encoded message that is returned in response to an AWS
 | ||||
| // request.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse
 | ||||
| type DecodeAuthorizationMessageOutput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -1976,7 +2023,6 @@ func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAu | |||
| } | ||||
| 
 | ||||
| // Identifiers for the federated user that is associated with the credentials.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser
 | ||||
| type FederatedUser struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -2017,7 +2063,6 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { | |||
| 	return s | ||||
| } | ||||
| 
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest
 | ||||
| type GetCallerIdentityInput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| } | ||||
|  | @ -2034,7 +2079,6 @@ func (s GetCallerIdentityInput) GoString() string { | |||
| 
 | ||||
| // Contains the response to a successful GetCallerIdentity request, including
 | ||||
| // information about the entity making the request.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse
 | ||||
| type GetCallerIdentityOutput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -2080,7 +2124,6 @@ func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { | |||
| 	return s | ||||
| } | ||||
| 
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest
 | ||||
| type GetFederationTokenInput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -2189,7 +2232,6 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { | |||
| 
 | ||||
| // Contains the response to a successful GetFederationToken request, including
 | ||||
| // temporary AWS credentials that can be used to make AWS requests.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse
 | ||||
| type GetFederationTokenOutput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -2242,7 +2284,6 @@ func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTo | |||
| 	return s | ||||
| } | ||||
| 
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest
 | ||||
| type GetSessionTokenInput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  | @ -2327,7 +2368,6 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { | |||
| 
 | ||||
| // Contains the response to a successful GetSessionToken request, including
 | ||||
| // temporary AWS credentials that can be used to make AWS requests.
 | ||||
| // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse
 | ||||
| type GetSessionTokenOutput struct { | ||||
| 	_ struct{} `type:"structure"` | ||||
| 
 | ||||
|  |  | |||
|  | @ -56,7 +56,7 @@ | |||
| //
 | ||||
| // Using the Client
 | ||||
| //
 | ||||
| // To AWS Security Token Service with the SDK use the New function to create
 | ||||
| // To contact AWS Security Token Service with the SDK use the New function to create
 | ||||
| // a new service client. With that client you can make API requests to the service.
 | ||||
| // These clients are safe to use concurrently.
 | ||||
| //
 | ||||
|  |  | |||
|  | @ -29,8 +29,9 @@ var initRequest func(*request.Request) | |||
| 
 | ||||
| // Service information constants
 | ||||
| const ( | ||||
| 	ServiceName = "sts"       // Service endpoint prefix API calls made to.
 | ||||
| 	EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
 | ||||
| 	ServiceName = "sts"       // Name of service.
 | ||||
| 	EndpointsID = ServiceName // ID to lookup a service endpoint with.
 | ||||
| 	ServiceID   = "STS"       // ServiceID is a unique identifer of a specific service.
 | ||||
| ) | ||||
| 
 | ||||
| // New creates a new instance of the STS client with a session.
 | ||||
|  | @ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio | |||
| 			cfg, | ||||
| 			metadata.ClientInfo{ | ||||
| 				ServiceName:   ServiceName, | ||||
| 				ServiceID:     ServiceID, | ||||
| 				SigningName:   signingName, | ||||
| 				SigningRegion: signingRegion, | ||||
| 				Endpoint:      endpoint, | ||||
|  |  | |||
|  | @ -31,7 +31,10 @@ import ( | |||
| ) | ||||
| 
 | ||||
| type NvidiaManager struct { | ||||
| 	sync.RWMutex | ||||
| 	sync.Mutex | ||||
| 
 | ||||
| 	// true if there are NVIDIA devices present on the node
 | ||||
| 	devicesPresent bool | ||||
| 
 | ||||
| 	// true if the NVML library (libnvidia-ml.so.1) was loaded successfully
 | ||||
| 	nvmlInitialized bool | ||||
|  | @ -51,20 +54,9 @@ func (nm *NvidiaManager) Setup() { | |||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	nm.initializeNVML() | ||||
| 	if nm.nvmlInitialized { | ||||
| 		return | ||||
| 	} | ||||
| 	go func() { | ||||
| 		glog.V(2).Info("Starting goroutine to initialize NVML") | ||||
| 		// TODO: use globalHousekeepingInterval
 | ||||
| 		for range time.Tick(time.Minute) { | ||||
| 			nm.initializeNVML() | ||||
| 			if nm.nvmlInitialized { | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| 	nm.devicesPresent = true | ||||
| 
 | ||||
| 	initializeNVML(nm) | ||||
| } | ||||
| 
 | ||||
| // detectDevices returns true if a device with given pci id is present on the node.
 | ||||
|  | @ -91,20 +83,18 @@ func detectDevices(vendorId string) bool { | |||
| } | ||||
| 
 | ||||
| // initializeNVML initializes the NVML library and sets up the nvmlDevices map.
 | ||||
| func (nm *NvidiaManager) initializeNVML() { | ||||
| // This is defined as a variable to help in testing.
 | ||||
| var initializeNVML = func(nm *NvidiaManager) { | ||||
| 	if err := gonvml.Initialize(); err != nil { | ||||
| 		// This is under a logging level because otherwise we may cause
 | ||||
| 		// log spam if the drivers/nvml is not installed on the system.
 | ||||
| 		glog.V(4).Infof("Could not initialize NVML: %v", err) | ||||
| 		return | ||||
| 	} | ||||
| 	nm.nvmlInitialized = true | ||||
| 	numDevices, err := gonvml.DeviceCount() | ||||
| 	if err != nil { | ||||
| 		glog.Warningf("GPU metrics would not be available. Failed to get the number of nvidia devices: %v", err) | ||||
| 		nm.Lock() | ||||
| 		// Even though we won't have GPU metrics, the library was initialized and should be shutdown when exiting.
 | ||||
| 		nm.nvmlInitialized = true | ||||
| 		nm.Unlock() | ||||
| 		return | ||||
| 	} | ||||
| 	glog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices) | ||||
|  | @ -122,10 +112,6 @@ func (nm *NvidiaManager) initializeNVML() { | |||
| 		} | ||||
| 		nm.nvidiaDevices[int(minorNumber)] = device | ||||
| 	} | ||||
| 	nm.Lock() | ||||
| 	// Doing this at the end to avoid race in accessing nvidiaDevices in GetCollector.
 | ||||
| 	nm.nvmlInitialized = true | ||||
| 	nm.Unlock() | ||||
| } | ||||
| 
 | ||||
| // Destroy shuts down NVML.
 | ||||
|  | @ -139,12 +125,21 @@ func (nm *NvidiaManager) Destroy() { | |||
| // present in the devices.list file in the given devicesCgroupPath.
 | ||||
| func (nm *NvidiaManager) GetCollector(devicesCgroupPath string) (AcceleratorCollector, error) { | ||||
| 	nc := &NvidiaCollector{} | ||||
| 	nm.RLock() | ||||
| 	if !nm.nvmlInitialized || len(nm.nvidiaDevices) == 0 { | ||||
| 		nm.RUnlock() | ||||
| 
 | ||||
| 	if !nm.devicesPresent { | ||||
| 		return nc, nil | ||||
| 	} | ||||
| 	nm.RUnlock() | ||||
| 	// Makes sure that we don't call initializeNVML() concurrently and
 | ||||
| 	// that we only call initializeNVML() when it's not initialized.
 | ||||
| 	nm.Lock() | ||||
| 	if !nm.nvmlInitialized { | ||||
| 		initializeNVML(nm) | ||||
| 	} | ||||
| 	if !nm.nvmlInitialized || len(nm.nvidiaDevices) == 0 { | ||||
| 		nm.Unlock() | ||||
| 		return nc, nil | ||||
| 	} | ||||
| 	nm.Unlock() | ||||
| 	nvidiaMinorNumbers, err := parseDevicesCgroup(devicesCgroupPath) | ||||
| 	if err != nil { | ||||
| 		return nc, err | ||||
|  |  | |||
|  | @ -1,250 +0,0 @@ | |||
| // Copyright 2014 Google Inc. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package api provides a handler for /api/
 | ||||
| package api | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"path" | ||||
| 	"regexp" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/google/cadvisor/events" | ||||
| 	httpmux "github.com/google/cadvisor/http/mux" | ||||
| 	info "github.com/google/cadvisor/info/v1" | ||||
| 	"github.com/google/cadvisor/manager" | ||||
| 
 | ||||
| 	"github.com/golang/glog" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	apiResource = "/api/" | ||||
| ) | ||||
| 
 | ||||
| func RegisterHandlers(mux httpmux.Mux, m manager.Manager) error { | ||||
| 	apiVersions := getApiVersions() | ||||
| 	supportedApiVersions := make(map[string]ApiVersion, len(apiVersions)) | ||||
| 	for _, v := range apiVersions { | ||||
| 		supportedApiVersions[v.Version()] = v | ||||
| 	} | ||||
| 
 | ||||
| 	mux.HandleFunc(apiResource, func(w http.ResponseWriter, r *http.Request) { | ||||
| 		err := handleRequest(supportedApiVersions, m, w, r) | ||||
| 		if err != nil { | ||||
| 			http.Error(w, err.Error(), 500) | ||||
| 		} | ||||
| 	}) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Captures the API version, requestType [optional], and remaining request [optional].
 | ||||
| var apiRegexp = regexp.MustCompile(`/api/([^/]+)/?([^/]+)?(.*)`) | ||||
| 
 | ||||
| const ( | ||||
| 	apiVersion = iota + 1 | ||||
| 	apiRequestType | ||||
| 	apiRequestArgs | ||||
| ) | ||||
| 
 | ||||
| func handleRequest(supportedApiVersions map[string]ApiVersion, m manager.Manager, w http.ResponseWriter, r *http.Request) error { | ||||
| 	start := time.Now() | ||||
| 	defer func() { | ||||
| 		glog.V(4).Infof("Request took %s", time.Since(start)) | ||||
| 	}() | ||||
| 
 | ||||
| 	request := r.URL.Path | ||||
| 
 | ||||
| 	const apiPrefix = "/api" | ||||
| 	if !strings.HasPrefix(request, apiPrefix) { | ||||
| 		return fmt.Errorf("incomplete API request %q", request) | ||||
| 	} | ||||
| 
 | ||||
| 	// If the request doesn't have an API version, list those.
 | ||||
| 	if request == apiPrefix || request == apiResource { | ||||
| 		versions := make([]string, 0, len(supportedApiVersions)) | ||||
| 		for v := range supportedApiVersions { | ||||
| 			versions = append(versions, v) | ||||
| 		} | ||||
| 		sort.Strings(versions) | ||||
| 		http.Error(w, fmt.Sprintf("Supported API versions: %s", strings.Join(versions, ",")), http.StatusBadRequest) | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Verify that we have all the elements we expect:
 | ||||
| 	// /<version>/<request type>[/<args...>]
 | ||||
| 	requestElements := apiRegexp.FindStringSubmatch(request) | ||||
| 	if len(requestElements) == 0 { | ||||
| 		return fmt.Errorf("malformed request %q", request) | ||||
| 	} | ||||
| 	version := requestElements[apiVersion] | ||||
| 	requestType := requestElements[apiRequestType] | ||||
| 	requestArgs := strings.Split(requestElements[apiRequestArgs], "/") | ||||
| 
 | ||||
| 	// Check supported versions.
 | ||||
| 	versionHandler, ok := supportedApiVersions[version] | ||||
| 	if !ok { | ||||
| 		return fmt.Errorf("unsupported API version %q", version) | ||||
| 	} | ||||
| 
 | ||||
| 	// If no request type, list possible request types.
 | ||||
| 	if requestType == "" { | ||||
| 		requestTypes := versionHandler.SupportedRequestTypes() | ||||
| 		sort.Strings(requestTypes) | ||||
| 		http.Error(w, fmt.Sprintf("Supported request types: %q", strings.Join(requestTypes, ",")), http.StatusBadRequest) | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Trim the first empty element from the request.
 | ||||
| 	if len(requestArgs) > 0 && requestArgs[0] == "" { | ||||
| 		requestArgs = requestArgs[1:] | ||||
| 	} | ||||
| 
 | ||||
| 	return versionHandler.HandleRequest(requestType, requestArgs, m, w, r) | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func writeResult(res interface{}, w http.ResponseWriter) error { | ||||
| 	out, err := json.Marshal(res) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("failed to marshall response %+v with error: %s", res, err) | ||||
| 	} | ||||
| 
 | ||||
| 	w.Header().Set("Content-Type", "application/json") | ||||
| 	w.Write(out) | ||||
| 	return nil | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func streamResults(eventChannel *events.EventChannel, w http.ResponseWriter, r *http.Request, m manager.Manager) error { | ||||
| 	cn, ok := w.(http.CloseNotifier) | ||||
| 	if !ok { | ||||
| 		return errors.New("could not access http.CloseNotifier") | ||||
| 	} | ||||
| 	flusher, ok := w.(http.Flusher) | ||||
| 	if !ok { | ||||
| 		return errors.New("could not access http.Flusher") | ||||
| 	} | ||||
| 
 | ||||
| 	w.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	w.WriteHeader(http.StatusOK) | ||||
| 	flusher.Flush() | ||||
| 
 | ||||
| 	enc := json.NewEncoder(w) | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-cn.CloseNotify(): | ||||
| 			m.CloseEventChannel(eventChannel.GetWatchId()) | ||||
| 			return nil | ||||
| 		case ev := <-eventChannel.GetChannel(): | ||||
| 			err := enc.Encode(ev) | ||||
| 			if err != nil { | ||||
| 				glog.Errorf("error encoding message %+v for result stream: %v", ev, err) | ||||
| 			} | ||||
| 			flusher.Flush() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func getContainerInfoRequest(body io.ReadCloser) (*info.ContainerInfoRequest, error) { | ||||
| 	query := info.DefaultContainerInfoRequest() | ||||
| 	decoder := json.NewDecoder(body) | ||||
| 	err := decoder.Decode(&query) | ||||
| 	if err != nil && err != io.EOF { | ||||
| 		return nil, fmt.Errorf("unable to decode the json value: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return &query, nil | ||||
| } | ||||
| 
 | ||||
| // The user can set any or none of the following arguments in any order
 | ||||
| // with any twice defined arguments being assigned the first value.
 | ||||
| // If the value type for the argument is wrong the field will be assumed to be
 | ||||
| // unassigned
 | ||||
| // bools: stream, subcontainers, oom_events, creation_events, deletion_events
 | ||||
| // ints: max_events, start_time (unix timestamp), end_time (unix timestamp)
 | ||||
| // example r.URL: http://localhost:8080/api/v1.3/events?oom_events=true&stream=true
 | ||||
| func getEventRequest(r *http.Request) (*events.Request, bool, error) { | ||||
| 	query := events.NewRequest() | ||||
| 	stream := false | ||||
| 
 | ||||
| 	urlMap := r.URL.Query() | ||||
| 
 | ||||
| 	if val, ok := urlMap["stream"]; ok { | ||||
| 		newBool, err := strconv.ParseBool(val[0]) | ||||
| 		if err == nil { | ||||
| 			stream = newBool | ||||
| 		} | ||||
| 	} | ||||
| 	if val, ok := urlMap["subcontainers"]; ok { | ||||
| 		newBool, err := strconv.ParseBool(val[0]) | ||||
| 		if err == nil { | ||||
| 			query.IncludeSubcontainers = newBool | ||||
| 		} | ||||
| 	} | ||||
| 	eventTypes := map[string]info.EventType{ | ||||
| 		"oom_events":      info.EventOom, | ||||
| 		"oom_kill_events": info.EventOomKill, | ||||
| 		"creation_events": info.EventContainerCreation, | ||||
| 		"deletion_events": info.EventContainerDeletion, | ||||
| 	} | ||||
| 	allEventTypes := false | ||||
| 	if val, ok := urlMap["all_events"]; ok { | ||||
| 		newBool, err := strconv.ParseBool(val[0]) | ||||
| 		if err == nil { | ||||
| 			allEventTypes = newBool | ||||
| 		} | ||||
| 	} | ||||
| 	for opt, eventType := range eventTypes { | ||||
| 		if allEventTypes { | ||||
| 			query.EventType[eventType] = true | ||||
| 		} else if val, ok := urlMap[opt]; ok { | ||||
| 			newBool, err := strconv.ParseBool(val[0]) | ||||
| 			if err == nil { | ||||
| 				query.EventType[eventType] = newBool | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if val, ok := urlMap["max_events"]; ok { | ||||
| 		newInt, err := strconv.Atoi(val[0]) | ||||
| 		if err == nil { | ||||
| 			query.MaxEventsReturned = int(newInt) | ||||
| 		} | ||||
| 	} | ||||
| 	if val, ok := urlMap["start_time"]; ok { | ||||
| 		newTime, err := time.Parse(time.RFC3339, val[0]) | ||||
| 		if err == nil { | ||||
| 			query.StartTime = newTime | ||||
| 		} | ||||
| 	} | ||||
| 	if val, ok := urlMap["end_time"]; ok { | ||||
| 		newTime, err := time.Parse(time.RFC3339, val[0]) | ||||
| 		if err == nil { | ||||
| 			query.EndTime = newTime | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return query, stream, nil | ||||
| } | ||||
| 
 | ||||
| func getContainerName(request []string) string { | ||||
| 	return path.Join("/", strings.Join(request, "/")) | ||||
| } | ||||
|  | @ -1,559 +0,0 @@ | |||
| // Copyright 2015 Google Inc. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package api | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"path" | ||||
| 	"strconv" | ||||
| 
 | ||||
| 	info "github.com/google/cadvisor/info/v1" | ||||
| 	"github.com/google/cadvisor/info/v2" | ||||
| 	"github.com/google/cadvisor/manager" | ||||
| 
 | ||||
| 	"github.com/golang/glog" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	containersApi    = "containers" | ||||
| 	subcontainersApi = "subcontainers" | ||||
| 	machineApi       = "machine" | ||||
| 	machineStatsApi  = "machinestats" | ||||
| 	dockerApi        = "docker" | ||||
| 	summaryApi       = "summary" | ||||
| 	statsApi         = "stats" | ||||
| 	specApi          = "spec" | ||||
| 	eventsApi        = "events" | ||||
| 	storageApi       = "storage" | ||||
| 	attributesApi    = "attributes" | ||||
| 	versionApi       = "version" | ||||
| 	psApi            = "ps" | ||||
| 	customMetricsApi = "appmetrics" | ||||
| ) | ||||
| 
 | ||||
| // Interface for a cAdvisor API version
 | ||||
| type ApiVersion interface { | ||||
| 	// Returns the version string.
 | ||||
| 	Version() string | ||||
| 
 | ||||
| 	// List of supported API endpoints.
 | ||||
| 	SupportedRequestTypes() []string | ||||
| 
 | ||||
| 	// Handles a request. The second argument is the parameters after /api/<version>/<endpoint>
 | ||||
| 	HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error | ||||
| } | ||||
| 
 | ||||
| // Gets all supported API versions.
 | ||||
| func getApiVersions() []ApiVersion { | ||||
| 	v1_0 := &version1_0{} | ||||
| 	v1_1 := newVersion1_1(v1_0) | ||||
| 	v1_2 := newVersion1_2(v1_1) | ||||
| 	v1_3 := newVersion1_3(v1_2) | ||||
| 	v2_0 := newVersion2_0() | ||||
| 	v2_1 := newVersion2_1(v2_0) | ||||
| 
 | ||||
| 	return []ApiVersion{v1_0, v1_1, v1_2, v1_3, v2_0, v2_1} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // API v1.0
 | ||||
| 
 | ||||
| type version1_0 struct { | ||||
| } | ||||
| 
 | ||||
| func (self *version1_0) Version() string { | ||||
| 	return "v1.0" | ||||
| } | ||||
| 
 | ||||
| func (self *version1_0) SupportedRequestTypes() []string { | ||||
| 	return []string{containersApi, machineApi} | ||||
| } | ||||
| 
 | ||||
| func (self *version1_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { | ||||
| 	switch requestType { | ||||
| 	case machineApi: | ||||
| 		glog.V(4).Infof("Api - Machine") | ||||
| 
 | ||||
| 		// Get the MachineInfo
 | ||||
| 		machineInfo, err := m.GetMachineInfo() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		err = writeResult(machineInfo, w) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	case containersApi: | ||||
| 		containerName := getContainerName(request) | ||||
| 		glog.V(4).Infof("Api - Container(%s)", containerName) | ||||
| 
 | ||||
| 		// Get the query request.
 | ||||
| 		query, err := getContainerInfoRequest(r.Body) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		// Get the container.
 | ||||
| 		cont, err := m.GetContainerInfo(containerName, query) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("failed to get container %q with error: %s", containerName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		// Only output the container as JSON.
 | ||||
| 		err = writeResult(cont, w) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	default: | ||||
| 		return fmt.Errorf("unknown request type %q", requestType) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // API v1.1
 | ||||
| 
 | ||||
| type version1_1 struct { | ||||
| 	baseVersion *version1_0 | ||||
| } | ||||
| 
 | ||||
| // v1.1 builds on v1.0.
 | ||||
| func newVersion1_1(v *version1_0) *version1_1 { | ||||
| 	return &version1_1{ | ||||
| 		baseVersion: v, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (self *version1_1) Version() string { | ||||
| 	return "v1.1" | ||||
| } | ||||
| 
 | ||||
| func (self *version1_1) SupportedRequestTypes() []string { | ||||
| 	return append(self.baseVersion.SupportedRequestTypes(), subcontainersApi) | ||||
| } | ||||
| 
 | ||||
| func (self *version1_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { | ||||
| 	switch requestType { | ||||
| 	case subcontainersApi: | ||||
| 		containerName := getContainerName(request) | ||||
| 		glog.V(4).Infof("Api - Subcontainers(%s)", containerName) | ||||
| 
 | ||||
| 		// Get the query request.
 | ||||
| 		query, err := getContainerInfoRequest(r.Body) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		// Get the subcontainers.
 | ||||
| 		containers, err := m.SubcontainersInfo(containerName, query) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("failed to get subcontainers for container %q with error: %s", containerName, err) | ||||
| 		} | ||||
| 
 | ||||
| 		// Only output the containers as JSON.
 | ||||
| 		err = writeResult(containers, w) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		return nil | ||||
| 	default: | ||||
| 		return self.baseVersion.HandleRequest(requestType, request, m, w, r) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // API v1.2
 | ||||
| 
 | ||||
| type version1_2 struct { | ||||
| 	baseVersion *version1_1 | ||||
| } | ||||
| 
 | ||||
| // v1.2 builds on v1.1.
 | ||||
| func newVersion1_2(v *version1_1) *version1_2 { | ||||
| 	return &version1_2{ | ||||
| 		baseVersion: v, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (self *version1_2) Version() string { | ||||
| 	return "v1.2" | ||||
| } | ||||
| 
 | ||||
| func (self *version1_2) SupportedRequestTypes() []string { | ||||
| 	return append(self.baseVersion.SupportedRequestTypes(), dockerApi) | ||||
| } | ||||
| 
 | ||||
| func (self *version1_2) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { | ||||
| 	switch requestType { | ||||
| 	case dockerApi: | ||||
| 		glog.V(4).Infof("Api - Docker(%v)", request) | ||||
| 
 | ||||
| 		// Get the query request.
 | ||||
| 		query, err := getContainerInfoRequest(r.Body) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		var containers map[string]info.ContainerInfo | ||||
| 		// map requests for "docker/" to "docker"
 | ||||
| 		if len(request) == 1 && len(request[0]) == 0 { | ||||
| 			request = request[:0] | ||||
| 		} | ||||
| 		switch len(request) { | ||||
| 		case 0: | ||||
| 			// Get all Docker containers.
 | ||||
| 			containers, err = m.AllDockerContainers(query) | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("failed to get all Docker containers with error: %v", err) | ||||
| 			} | ||||
| 		case 1: | ||||
| 			// Get one Docker container.
 | ||||
| 			var cont info.ContainerInfo | ||||
| 			cont, err = m.DockerContainer(request[0], query) | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("failed to get Docker container %q with error: %v", request[0], err) | ||||
| 			} | ||||
| 			containers = map[string]info.ContainerInfo{ | ||||
| 				cont.Name: cont, | ||||
| 			} | ||||
| 		default: | ||||
| 			return fmt.Errorf("unknown request for Docker container %v", request) | ||||
| 		} | ||||
| 
 | ||||
| 		// Only output the containers as JSON.
 | ||||
| 		err = writeResult(containers, w) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		return nil | ||||
| 	default: | ||||
| 		return self.baseVersion.HandleRequest(requestType, request, m, w, r) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // API v1.3
 | ||||
| 
 | ||||
| type version1_3 struct { | ||||
| 	baseVersion *version1_2 | ||||
| } | ||||
| 
 | ||||
| // v1.3 builds on v1.2.
 | ||||
| func newVersion1_3(v *version1_2) *version1_3 { | ||||
| 	return &version1_3{ | ||||
| 		baseVersion: v, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (self *version1_3) Version() string { | ||||
| 	return "v1.3" | ||||
| } | ||||
| 
 | ||||
| func (self *version1_3) SupportedRequestTypes() []string { | ||||
| 	return append(self.baseVersion.SupportedRequestTypes(), eventsApi) | ||||
| } | ||||
| 
 | ||||
| func (self *version1_3) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { | ||||
| 	switch requestType { | ||||
| 	case eventsApi: | ||||
| 		return handleEventRequest(request, m, w, r) | ||||
| 	default: | ||||
| 		return self.baseVersion.HandleRequest(requestType, request, m, w, r) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func handleEventRequest(request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { | ||||
| 	query, stream, err := getEventRequest(r) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	query.ContainerName = path.Join("/", getContainerName(request)) | ||||
| 	glog.V(4).Infof("Api - Events(%v)", query) | ||||
| 	if !stream { | ||||
| 		pastEvents, err := m.GetPastEvents(query) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		return writeResult(pastEvents, w) | ||||
| 	} | ||||
| 	eventChannel, err := m.WatchForEvents(query) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return streamResults(eventChannel, w, r, m) | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // API v2.0
 | ||||
| 
 | ||||
| type version2_0 struct { | ||||
| } | ||||
| 
 | ||||
| func newVersion2_0() *version2_0 { | ||||
| 	return &version2_0{} | ||||
| } | ||||
| 
 | ||||
| func (self *version2_0) Version() string { | ||||
| 	return "v2.0" | ||||
| } | ||||
| 
 | ||||
| func (self *version2_0) SupportedRequestTypes() []string { | ||||
| 	return []string{versionApi, attributesApi, eventsApi, machineApi, summaryApi, statsApi, specApi, storageApi, psApi, customMetricsApi} | ||||
| } | ||||
| 
 | ||||
| func (self *version2_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { | ||||
| 	opt, err := getRequestOptions(r) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	switch requestType { | ||||
| 	case versionApi: | ||||
| 		glog.V(4).Infof("Api - Version") | ||||
| 		versionInfo, err := m.GetVersionInfo() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		return writeResult(versionInfo.CadvisorVersion, w) | ||||
| 	case attributesApi: | ||||
| 		glog.V(4).Info("Api - Attributes") | ||||
| 
 | ||||
| 		machineInfo, err := m.GetMachineInfo() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		versionInfo, err := m.GetVersionInfo() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		info := v2.GetAttributes(machineInfo, versionInfo) | ||||
| 		return writeResult(info, w) | ||||
| 	case machineApi: | ||||
| 		glog.V(4).Info("Api - Machine") | ||||
| 
 | ||||
| 		// TODO(rjnagal): Move machineInfo from v1.
 | ||||
| 		machineInfo, err := m.GetMachineInfo() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		return writeResult(machineInfo, w) | ||||
| 	case summaryApi: | ||||
| 		containerName := getContainerName(request) | ||||
| 		glog.V(4).Infof("Api - Summary for container %q, options %+v", containerName, opt) | ||||
| 
 | ||||
| 		stats, err := m.GetDerivedStats(containerName, opt) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		return writeResult(stats, w) | ||||
| 	case statsApi: | ||||
| 		name := getContainerName(request) | ||||
| 		glog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt) | ||||
| 		infos, err := m.GetRequestedContainersInfo(name, opt) | ||||
| 		if err != nil { | ||||
| 			if len(infos) == 0 { | ||||
| 				return err | ||||
| 			} | ||||
| 			glog.Errorf("Error calling GetRequestedContainersInfo: %v", err) | ||||
| 		} | ||||
| 		contStats := make(map[string][]v2.DeprecatedContainerStats, 0) | ||||
| 		for name, cinfo := range infos { | ||||
| 			contStats[name] = v2.DeprecatedStatsFromV1(cinfo) | ||||
| 		} | ||||
| 		return writeResult(contStats, w) | ||||
| 	case customMetricsApi: | ||||
| 		containerName := getContainerName(request) | ||||
| 		glog.V(4).Infof("Api - Custom Metrics: Looking for metrics for container %q, options %+v", containerName, opt) | ||||
| 		infos, err := m.GetContainerInfoV2(containerName, opt) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		contMetrics := make(map[string]map[string]map[string][]info.MetricValBasic, 0) | ||||
| 		for _, cinfo := range infos { | ||||
| 			metrics := make(map[string]map[string][]info.MetricValBasic, 0) | ||||
| 			for _, contStat := range cinfo.Stats { | ||||
| 				if len(contStat.CustomMetrics) == 0 { | ||||
| 					continue | ||||
| 				} | ||||
| 				for name, allLabels := range contStat.CustomMetrics { | ||||
| 					metricLabels := make(map[string][]info.MetricValBasic, 0) | ||||
| 					for _, metric := range allLabels { | ||||
| 						if !metric.Timestamp.IsZero() { | ||||
| 							metVal := info.MetricValBasic{ | ||||
| 								Timestamp:  metric.Timestamp, | ||||
| 								IntValue:   metric.IntValue, | ||||
| 								FloatValue: metric.FloatValue, | ||||
| 							} | ||||
| 							labels := metrics[name] | ||||
| 							if labels != nil { | ||||
| 								values := labels[metric.Label] | ||||
| 								values = append(values, metVal) | ||||
| 								labels[metric.Label] = values | ||||
| 								metrics[name] = labels | ||||
| 							} else { | ||||
| 								metricLabels[metric.Label] = []info.MetricValBasic{metVal} | ||||
| 								metrics[name] = metricLabels | ||||
| 							} | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			contMetrics[containerName] = metrics | ||||
| 		} | ||||
| 		return writeResult(contMetrics, w) | ||||
| 	case specApi: | ||||
| 		containerName := getContainerName(request) | ||||
| 		glog.V(4).Infof("Api - Spec for container %q, options %+v", containerName, opt) | ||||
| 		specs, err := m.GetContainerSpec(containerName, opt) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		return writeResult(specs, w) | ||||
| 	case storageApi: | ||||
| 		label := r.URL.Query().Get("label") | ||||
| 		uuid := r.URL.Query().Get("uuid") | ||||
| 		switch { | ||||
| 		case uuid != "": | ||||
| 			fi, err := m.GetFsInfoByFsUUID(uuid) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			return writeResult(fi, w) | ||||
| 		case label != "": | ||||
| 			// Get a specific label.
 | ||||
| 			fi, err := m.GetFsInfo(label) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			return writeResult(fi, w) | ||||
| 		default: | ||||
| 			// Get all global filesystems info.
 | ||||
| 			fi, err := m.GetFsInfo("") | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			return writeResult(fi, w) | ||||
| 		} | ||||
| 	case eventsApi: | ||||
| 		return handleEventRequest(request, m, w, r) | ||||
| 	case psApi: | ||||
| 		// reuse container type from request.
 | ||||
| 		// ignore recursive.
 | ||||
| 		// TODO(rjnagal): consider count to limit ps output.
 | ||||
| 		name := getContainerName(request) | ||||
| 		glog.V(4).Infof("Api - Spec for container %q, options %+v", name, opt) | ||||
| 		ps, err := m.GetProcessList(name, opt) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("process listing failed: %v", err) | ||||
| 		} | ||||
| 		return writeResult(ps, w) | ||||
| 	default: | ||||
| 		return fmt.Errorf("unknown request type %q", requestType) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| type version2_1 struct { | ||||
| 	baseVersion *version2_0 | ||||
| } | ||||
| 
 | ||||
| func newVersion2_1(v *version2_0) *version2_1 { | ||||
| 	return &version2_1{ | ||||
| 		baseVersion: v, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (self *version2_1) Version() string { | ||||
| 	return "v2.1" | ||||
| } | ||||
| 
 | ||||
| func (self *version2_1) SupportedRequestTypes() []string { | ||||
| 	return append([]string{machineStatsApi}, self.baseVersion.SupportedRequestTypes()...) | ||||
| } | ||||
| 
 | ||||
| func (self *version2_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { | ||||
| 	// Get the query request.
 | ||||
| 	opt, err := getRequestOptions(r) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	switch requestType { | ||||
| 	case machineStatsApi: | ||||
| 		glog.V(4).Infof("Api - MachineStats(%v)", request) | ||||
| 		cont, err := m.GetRequestedContainersInfo("/", opt) | ||||
| 		if err != nil { | ||||
| 			if len(cont) == 0 { | ||||
| 				return err | ||||
| 			} | ||||
| 			glog.Errorf("Error calling GetRequestedContainersInfo: %v", err) | ||||
| 		} | ||||
| 		return writeResult(v2.MachineStatsFromV1(cont["/"]), w) | ||||
| 	case statsApi: | ||||
| 		name := getContainerName(request) | ||||
| 		glog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt) | ||||
| 		conts, err := m.GetRequestedContainersInfo(name, opt) | ||||
| 		if err != nil { | ||||
| 			if len(conts) == 0 { | ||||
| 				return err | ||||
| 			} | ||||
| 			glog.Errorf("Error calling GetRequestedContainersInfo: %v", err) | ||||
| 		} | ||||
| 		contStats := make(map[string]v2.ContainerInfo, len(conts)) | ||||
| 		for name, cont := range conts { | ||||
| 			if name == "/" { | ||||
| 				// Root cgroup stats should be exposed as machine stats
 | ||||
| 				continue | ||||
| 			} | ||||
| 			contStats[name] = v2.ContainerInfo{ | ||||
| 				Spec:  v2.ContainerSpecFromV1(&cont.Spec, cont.Aliases, cont.Namespace), | ||||
| 				Stats: v2.ContainerStatsFromV1(name, &cont.Spec, cont.Stats), | ||||
| 			} | ||||
| 		} | ||||
| 		return writeResult(contStats, w) | ||||
| 	default: | ||||
| 		return self.baseVersion.HandleRequest(requestType, request, m, w, r) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func getRequestOptions(r *http.Request) (v2.RequestOptions, error) { | ||||
| 	supportedTypes := map[string]bool{ | ||||
| 		v2.TypeName:   true, | ||||
| 		v2.TypeDocker: true, | ||||
| 	} | ||||
| 	// fill in the defaults.
 | ||||
| 	opt := v2.RequestOptions{ | ||||
| 		IdType:    v2.TypeName, | ||||
| 		Count:     64, | ||||
| 		Recursive: false, | ||||
| 	} | ||||
| 	idType := r.URL.Query().Get("type") | ||||
| 	if len(idType) != 0 { | ||||
| 		if !supportedTypes[idType] { | ||||
| 			return opt, fmt.Errorf("unknown 'type' %q", idType) | ||||
| 		} | ||||
| 		opt.IdType = idType | ||||
| 	} | ||||
| 	count := r.URL.Query().Get("count") | ||||
| 	if len(count) != 0 { | ||||
| 		n, err := strconv.ParseUint(count, 10, 32) | ||||
| 		if err != nil { | ||||
| 			return opt, fmt.Errorf("failed to parse 'count' option: %v", count) | ||||
| 		} | ||||
| 		opt.Count = int(n) | ||||
| 	} | ||||
| 	recursive := r.URL.Query().Get("recursive") | ||||
| 	if recursive == "true" { | ||||
| 		opt.Recursive = true | ||||
| 	} | ||||
| 	return opt, nil | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue