Merge pull request #4719 from AliyunContainerService/aliyun-vfs-impl

Implement vfs with AlibabaCloud OSS
This commit is contained in:
k8s-ci-robot 2018-04-02 08:40:02 -07:00 committed by GitHub
commit fcd010044b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 5887 additions and 3 deletions

13
Gopkg.lock generated
View File

@ -188,6 +188,16 @@
packages = ["."]
revision = "511bcaf42ccd42c38aba7427b6673277bf19e2a1"
[[projects]]
branch = "master"
name = "github.com/denverdino/aliyungo"
packages = [
"common",
"oss",
"util"
]
revision = "2581e433b270014481c9ec66a91368661533febb"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
@ -414,6 +424,7 @@
"openstack/identity/v3/tokens",
"openstack/networking/v2/extensions/security/groups",
"openstack/networking/v2/extensions/security/rules",
"openstack/networking/v2/networks",
"openstack/objectstorage/v1/accounts",
"openstack/objectstorage/v1/containers",
"openstack/objectstorage/v1/objects",
@ -1546,6 +1557,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "bd858ef77d1e30c2883d12e8783afccb43334885c756b5910b04524d24d6515d"
inputs-digest = "a0c1b19300e701d8723bb85312e6578003ed0de92dcdd0ac2c3990574af69571"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -9,6 +9,8 @@ go_library(
"k8scontext.go",
"k8sfs.go",
"memfs.go",
"osscontext.go",
"ossfs.go",
"s3context.go",
"s3fs.go",
"sshfs.go",
@ -28,6 +30,7 @@ go_library(
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/s3:go_default_library",
"//vendor/github.com/denverdino/aliyungo/oss:go_default_library",
"//vendor/github.com/go-ini/ini:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/gophercloud/gophercloud:go_default_library",

View File

@ -26,6 +26,7 @@ import (
"sync"
"time"
"github.com/denverdino/aliyungo/oss"
"github.com/golang/glog"
"github.com/gophercloud/gophercloud"
"golang.org/x/net/context"
@ -46,6 +47,8 @@ type VFSContext struct {
gcsClient *storage.Service
// swiftClient is the openstack swift client
swiftClient *gophercloud.ServiceClient
// ossClient is the Aliyun Open Source Storage client
ossClient *oss.Client
}
var Context = VFSContext{
@ -129,6 +132,10 @@ func (c *VFSContext) BuildVfsPath(p string) (Path, error) {
return c.buildOpenstackSwiftPath(p)
}
if strings.HasPrefix(p, "oss://") {
return c.buildOSSPath(p)
}
return nil, fmt.Errorf("unknown / unhandled path type: %q", p)
}
@ -370,3 +377,29 @@ func (c *VFSContext) buildOpenstackSwiftPath(p string) (*SwiftPath, error) {
return NewSwiftPath(c.swiftClient, bucket, u.Path)
}
func (c *VFSContext) buildOSSPath(p string) (*OSSPath, error) {
u, err := url.Parse(p)
if err != nil {
return nil, fmt.Errorf("invalid aliyun oss path: %q", p)
}
if u.Scheme != "oss" {
return nil, fmt.Errorf("invalid aliyun oss path: %q", p)
}
bucket := strings.TrimSuffix(u.Host, "/")
if bucket == "" {
return nil, fmt.Errorf("invalid aliyun oss path: %q", p)
}
if c.ossClient == nil {
ossClient, err := NewAliOSSClient()
if err != nil {
return nil, err
}
c.ossClient = ossClient
}
return NewOSSPath(c.ossClient, bucket, u.Path)
}

View File

@ -0,0 +1,78 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vfs
import (
"fmt"
"os"
"strings"
"github.com/denverdino/aliyungo/oss"
)
type aliyunOSSConfig struct {
region oss.Region
internal bool
accessKeyId string
accessKeySecret string
secure bool
}
func NewOSSPath(client *oss.Client, bucket string, key string) (*OSSPath, error) {
bucket = strings.TrimSuffix(bucket, "/")
key = strings.TrimPrefix(key, "/")
return &OSSPath{
client: client,
bucket: bucket,
key: key,
}, nil
}
func NewAliOSSClient() (*oss.Client, error) {
c := &aliyunOSSConfig{}
err := c.loadConfig()
if err != nil {
return nil, fmt.Errorf("error building aliyun oss client: %v", err)
}
return oss.NewOSSClient(c.region, c.internal, c.accessKeyId, c.accessKeySecret, c.secure), nil
}
func (c *aliyunOSSConfig) loadConfig() error {
c.region = oss.Region(os.Getenv("OSS_REGION"))
if c.region == "" {
// TODO: can we use default region?
return fmt.Errorf("OSS_REGION cannot be empty")
}
c.accessKeyId = os.Getenv("ALIYUN_ACCESS_KEY_ID")
if c.accessKeyId == "" {
return fmt.Errorf("ALIYUN_ACCESS_KEY_ID cannot be empty")
}
c.accessKeySecret = os.Getenv("ALIYUN_ACCESS_KEY_SECRET")
if c.accessKeySecret == "" {
return fmt.Errorf("ALIYUN_ACCESS_KEY_SECRET cannot be empty")
}
ossInternal := os.Getenv("ALIYUN_OSS_INTERNAL")
if ossInternal != "" {
c.internal = true
} else {
c.internal = false
}
c.secure = true
return nil
}

356
util/pkg/vfs/ossfs.go Normal file
View File

@ -0,0 +1,356 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vfs
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"strings"
"sync"
"time"
"github.com/denverdino/aliyungo/oss"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kops/util/pkg/hashing"
)
// OSSPath is a vfs path for Aliyun Open Storage Service
type OSSPath struct {
client *oss.Client
bucket string
hash string
key string
}
var _ Path = &OSSPath{}
var _ HasHash = &OSSPath{}
// ossReadBackoff is the backoff strategy for Aliyun OSS read retries.
var ossReadBackoff = wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Jitter: 0.1,
Steps: 4,
}
// ossWriteBackoff is the backoff strategy for Aliyun OSS write retries
var ossWriteBackoff = wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Jitter: 0.1,
Steps: 5,
}
type listOption struct {
prefix string
delim string
marker string
max int
}
// WriteTo implements io.WriteTo
func (p *OSSPath) WriteTo(out io.Writer) (int64, error) {
glog.V(4).Infof("Reading file %q", p)
b := p.client.Bucket(p.bucket)
headers := http.Header{}
response, err := b.GetResponseWithHeaders(p.key, headers)
if err != nil {
if isOSSNotFound(err) {
return 0, os.ErrNotExist
}
return 0, fmt.Errorf("error fetching %s: %v", p, err)
}
defer response.Body.Close()
n, err := io.Copy(out, response.Body)
if err != nil {
return n, fmt.Errorf("error reading %s: %v", p, err)
}
return n, nil
}
func (p *OSSPath) Join(relativePath ...string) Path {
args := []string{p.key}
args = append(args, relativePath...)
joined := path.Join(args...)
return &OSSPath{
client: p.client,
bucket: p.bucket,
key: joined,
}
}
func (p *OSSPath) ReadFile() ([]byte, error) {
var b bytes.Buffer
done, err := RetryWithBackoff(ossReadBackoff, func() (bool, error) {
b.Reset()
_, err := p.WriteTo(&b)
if err != nil {
if os.IsNotExist(err) {
// Not recoverable
return true, err
}
return false, err
}
// Success!
return true, nil
})
if err != nil {
return nil, err
} else if done {
return b.Bytes(), nil
} else {
// Shouldn't happen - we always return a non-nil error with false
return nil, wait.ErrWaitTimeout
}
}
func (p *OSSPath) WriteFile(data io.ReadSeeker, acl ACL) error {
b := p.client.Bucket(p.bucket)
done, err := RetryWithBackoff(ossWriteBackoff, func() (bool, error) {
glog.V(4).Infof("Writing file %q", p)
var perm oss.ACL
var ok bool
if acl != nil {
perm, ok = acl.(oss.ACL)
if !ok {
return true, fmt.Errorf("write to %s with ACL of unexpected type %T", p, acl)
}
} else {
// Private currently is the default ACL
perm = oss.Private
}
if _, err := data.Seek(0, 0); err != nil {
return false, fmt.Errorf("error seeking to start of data stream for write to %s: %v", p, err)
}
bytes, err := ioutil.ReadAll(data)
if err != nil {
return false, fmt.Errorf("error reading from data stream: %v", err)
}
contType := "application/octet-stream"
err = b.Put(p.key, bytes, contType, perm, oss.Options{})
if err != nil {
return false, fmt.Errorf("error writing %s: %v", p, err)
}
return true, nil
})
if err != nil {
return err
} else if done {
return nil
} else {
// Shouldn't happen - we always return a non-nil error with false
return wait.ErrWaitTimeout
}
}
// To prevent concurrent creates on the same file while maintaining atomicity of writes,
// we take a process-wide lock during the operation.
// Not a great approach, but fine for a single process (with low concurrency)
// TODO: should we enable versioning?
var createFileLockOSS sync.Mutex
func (p *OSSPath) CreateFile(data io.ReadSeeker, acl ACL) error {
createFileLockOSS.Lock()
defer createFileLockOSS.Unlock()
// Check if exists
b := p.client.Bucket(p.bucket)
exist, err := b.Exists(p.key)
if err != nil {
return err
}
if exist {
return os.ErrExist
}
return p.WriteFile(data, acl)
}
func (p *OSSPath) Remove() error {
b := p.client.Bucket(p.bucket)
done, err := RetryWithBackoff(ossWriteBackoff, func() (bool, error) {
glog.V(8).Infof("removing file %s", p)
err := b.Del(p.key)
if err != nil {
return false, fmt.Errorf("error deleting %s: %v", p, err)
}
return true, nil
})
if err != nil {
return err
} else if done {
return nil
} else {
// Shouldn't happen - we always return a non-nil error with false
return wait.ErrWaitTimeout
}
}
func (p *OSSPath) Base() string {
return path.Base(p.key)
}
func (p *OSSPath) Path() string {
return "oss://" + p.bucket + "/" + p.key
}
func (p *OSSPath) ReadDir() ([]Path, error) {
prefix := p.key
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
// OSS can return at most 1000 paths(keys + common prefixes) at a time
opt := listOption{
prefix: prefix,
delim: "/",
marker: "",
max: 1000,
}
return p.listPath(opt)
}
func (p *OSSPath) ReadTree() ([]Path, error) {
prefix := p.key
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
// OSS can return at most 1000 paths(keys + common prefixes) at a time
opt := listOption{
prefix: prefix,
// No delimiter for recursive search
delim: "",
marker: "",
max: 1000,
}
return p.listPath(opt)
}
func (p *OSSPath) PreferredHash() (*hashing.Hash, error) {
return p.Hash(hashing.HashAlgorithmMD5)
}
func (p *OSSPath) Hash(a hashing.HashAlgorithm) (*hashing.Hash, error) {
if a != hashing.HashAlgorithmMD5 {
return nil, nil
}
md5 := p.hash
if md5 == "" {
return nil, nil
}
md5Bytes, err := hex.DecodeString(md5)
if err != nil {
return nil, fmt.Errorf("Etag was not a valid MD5 sum: %q", md5)
}
return &hashing.Hash{Algorithm: hashing.HashAlgorithmMD5, HashValue: md5Bytes}, nil
}
func (p *OSSPath) listPath(opt listOption) ([]Path, error) {
var ret []Path
b := p.client.Bucket(p.bucket)
done, err := RetryWithBackoff(ossReadBackoff, func() (bool, error) {
var paths []Path
for {
// OSS can return at most 1000 paths(keys + common prefixes) at a time
resp, err := b.List(opt.prefix, opt.delim, opt.marker, opt.max)
if err != nil {
if isOSSNotFound(err) {
return true, os.ErrNotExist
}
return false, fmt.Errorf("error listing %s: %v", p, err)
}
if len(resp.Contents) != 0 || len(resp.CommonPrefixes) != 0 {
// Contents represent files
for _, k := range resp.Contents {
child := &OSSPath{
client: p.client,
bucket: p.bucket,
key: k.Key,
}
paths = append(paths, child)
}
if len(resp.Contents) != 0 {
// start with the last key in next iteration of listing.
opt.marker = resp.Contents[len(resp.Contents)-1].Key
}
// CommonPrefixes represent directories
for _, d := range resp.CommonPrefixes {
child := &OSSPath{
client: p.client,
bucket: p.bucket,
key: d,
}
paths = append(paths, child)
}
if len(resp.CommonPrefixes) != 0 {
lastComPref := resp.CommonPrefixes[len(resp.CommonPrefixes)-1]
if strings.Compare(lastComPref, opt.marker) == 1 {
opt.marker = lastComPref
}
}
} else {
// no more files or directories
break
}
}
glog.V(8).Infof("Listed files in %v: %v", p, paths)
ret = paths
return true, nil
})
if err != nil {
return nil, err
} else if done {
return ret, nil
} else {
// Shouldn't happen - we always return a non-nil error with false
return nil, wait.ErrWaitTimeout
}
}
func isOSSNotFound(err error) bool {
if err == nil {
return false
}
ossErr, ok := err.(*oss.Error)
return ok && ossErr.StatusCode == 404
}

View File

@ -102,7 +102,7 @@ func IsClusterReadable(p Path) bool {
}
switch p.(type) {
case *S3Path, *GSPath, *SwiftPath:
case *S3Path, *GSPath, *SwiftPath, *OSSPath:
return true
case *KubernetesPath:

191
vendor/github.com/denverdino/aliyungo/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015-2015 Li Yi (denverdino@gmail.com).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,16 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"client.go",
"endpoint.go",
"regions.go",
"request.go",
"types.go",
"version.go",
],
importpath = "github.com/denverdino/aliyungo/common",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/denverdino/aliyungo/util:go_default_library"],
)

502
vendor/github.com/denverdino/aliyungo/common/client.go generated vendored Normal file
View File

@ -0,0 +1,502 @@
package common
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/denverdino/aliyungo/util"
)
// RemovalPolicy.N add index to array item
// RemovalPolicy=["a", "b"] => RemovalPolicy.1="a" RemovalPolicy.2="b"
type FlattenArray []string
// string contains underline which will be replaced with dot
// SystemDisk_Category => SystemDisk.Category
type UnderlineString string
// A Client represents a client of ECS services
type Client struct {
AccessKeyId string //Access Key Id
AccessKeySecret string //Access Key Secret
securityToken string
debug bool
httpClient *http.Client
endpoint string
version string
serviceCode string
regionID Region
businessInfo string
userAgent string
}
// Initialize properties of a client instance
func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) {
client.AccessKeyId = accessKeyId
ak := accessKeySecret
if !strings.HasSuffix(ak, "&") {
ak += "&"
}
client.AccessKeySecret = ak
client.debug = false
handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout"))
if err != nil {
handshakeTimeout = 0
}
if handshakeTimeout == 0 {
client.httpClient = &http.Client{}
} else {
t := &http.Transport{
TLSHandshakeTimeout: time.Duration(handshakeTimeout) * time.Second}
client.httpClient = &http.Client{Transport: t}
}
client.endpoint = endpoint
client.version = version
}
// Initialize properties of a client instance including regionID
func (client *Client) NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region) {
client.Init(endpoint, version, accessKeyId, accessKeySecret)
client.serviceCode = serviceCode
client.regionID = regionID
client.setEndpointByLocation(regionID, serviceCode, accessKeyId, accessKeySecret, client.securityToken)
}
// Intialize client object when all properties are ready
func (client *Client) InitClient() *Client {
client.debug = false
handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout"))
if err != nil {
handshakeTimeout = 0
}
if handshakeTimeout == 0 {
client.httpClient = &http.Client{}
} else {
t := &http.Transport{
TLSHandshakeTimeout: time.Duration(handshakeTimeout) * time.Second}
client.httpClient = &http.Client{Transport: t}
}
client.setEndpointByLocation(client.regionID, client.serviceCode, client.AccessKeyId, client.AccessKeySecret, client.securityToken)
return client
}
func (client *Client) NewInitForAssumeRole(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region, securityToken string) {
client.NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode, regionID)
client.securityToken = securityToken
}
//NewClient using location service
func (client *Client) setEndpointByLocation(region Region, serviceCode, accessKeyId, accessKeySecret, securityToken string) {
locationClient := NewLocationClient(accessKeyId, accessKeySecret, securityToken)
ep := locationClient.DescribeOpenAPIEndpoint(region, serviceCode)
if ep == "" {
ep = loadEndpointFromFile(region, serviceCode)
}
if ep != "" {
client.endpoint = ep
}
}
// Ensure all necessary properties are valid
func (client *Client) ensureProperties() error {
var msg string
if client.endpoint == "" {
msg = fmt.Sprintf("endpoint cannot be empty!")
} else if client.version == "" {
msg = fmt.Sprintf("version cannot be empty!")
} else if client.AccessKeyId == "" {
msg = fmt.Sprintf("AccessKeyId cannot be empty!")
} else if client.AccessKeySecret == "" {
msg = fmt.Sprintf("AccessKeySecret cannot be empty!")
}
if msg != "" {
return errors.New(msg)
}
return nil
}
// ----------------------------------------------------
// WithXXX methods
// ----------------------------------------------------
// WithEndpoint sets custom endpoint
func (client *Client) WithEndpoint(endpoint string) *Client {
client.SetEndpoint(endpoint)
return client
}
// WithVersion sets custom version
func (client *Client) WithVersion(version string) *Client {
client.SetVersion(version)
return client
}
// WithRegionID sets Region ID
func (client *Client) WithRegionID(regionID Region) *Client {
client.SetRegionID(regionID)
return client
}
//WithServiceCode sets serviceCode
func (client *Client) WithServiceCode(serviceCode string) *Client {
client.SetServiceCode(serviceCode)
return client
}
// WithAccessKeyId sets new AccessKeyId
func (client *Client) WithAccessKeyId(id string) *Client {
client.SetAccessKeyId(id)
return client
}
// WithAccessKeySecret sets new AccessKeySecret
func (client *Client) WithAccessKeySecret(secret string) *Client {
client.SetAccessKeySecret(secret)
return client
}
// WithSecurityToken sets securityToken
func (client *Client) WithSecurityToken(securityToken string) *Client {
client.SetSecurityToken(securityToken)
return client
}
// WithDebug sets debug mode to log the request/response message
func (client *Client) WithDebug(debug bool) *Client {
client.SetDebug(debug)
return client
}
// WithBusinessInfo sets business info to log the request/response message
func (client *Client) WithBusinessInfo(businessInfo string) *Client {
client.SetBusinessInfo(businessInfo)
return client
}
// WithUserAgent sets user agent to the request/response message
func (client *Client) WithUserAgent(userAgent string) *Client {
client.SetUserAgent(userAgent)
return client
}
// ----------------------------------------------------
// SetXXX methods
// ----------------------------------------------------
// SetEndpoint sets custom endpoint
func (client *Client) SetEndpoint(endpoint string) {
client.endpoint = endpoint
}
// SetEndpoint sets custom version
func (client *Client) SetVersion(version string) {
client.version = version
}
// SetEndpoint sets Region ID
func (client *Client) SetRegionID(regionID Region) {
client.regionID = regionID
}
//SetServiceCode sets serviceCode
func (client *Client) SetServiceCode(serviceCode string) {
client.serviceCode = serviceCode
}
// SetAccessKeyId sets new AccessKeyId
func (client *Client) SetAccessKeyId(id string) {
client.AccessKeyId = id
}
// SetAccessKeySecret sets new AccessKeySecret
func (client *Client) SetAccessKeySecret(secret string) {
client.AccessKeySecret = secret + "&"
}
// SetDebug sets debug mode to log the request/response message
func (client *Client) SetDebug(debug bool) {
client.debug = debug
}
// SetBusinessInfo sets business info to log the request/response message
func (client *Client) SetBusinessInfo(businessInfo string) {
if strings.HasPrefix(businessInfo, "/") {
client.businessInfo = businessInfo
} else if businessInfo != "" {
client.businessInfo = "/" + businessInfo
}
}
// SetUserAgent sets user agent to the request/response message
func (client *Client) SetUserAgent(userAgent string) {
client.userAgent = userAgent
}
//set SecurityToken
func (client *Client) SetSecurityToken(securityToken string) {
client.securityToken = securityToken
}
// Invoke sends the raw HTTP request for ECS services
func (client *Client) Invoke(action string, args interface{}, response interface{}) error {
if err := client.ensureProperties(); err != nil {
return err
}
request := Request{}
request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID)
query := util.ConvertToQueryValues(request)
util.SetQueryValues(args, &query)
// Sign request
signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret)
// Generate the request URL
requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature)
httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil)
if err != nil {
return GetClientError(err)
}
// TODO move to util and add build val flag
httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo)
httpReq.Header.Set("User-Agent", httpReq.UserAgent()+" "+client.userAgent)
t0 := time.Now()
httpResp, err := client.httpClient.Do(httpReq)
t1 := time.Now()
if err != nil {
return GetClientError(err)
}
statusCode := httpResp.StatusCode
if client.debug {
log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0))
}
defer httpResp.Body.Close()
body, err := ioutil.ReadAll(httpResp.Body)
if err != nil {
return GetClientError(err)
}
if client.debug {
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, body, "", " ")
log.Println(string(prettyJSON.Bytes()))
}
if statusCode >= 400 && statusCode <= 599 {
errorResponse := ErrorResponse{}
err = json.Unmarshal(body, &errorResponse)
ecsError := &Error{
ErrorResponse: errorResponse,
StatusCode: statusCode,
}
return ecsError
}
err = json.Unmarshal(body, response)
//log.Printf("%++v", response)
if err != nil {
return GetClientError(err)
}
return nil
}
// Invoke sends the raw HTTP request for ECS services
func (client *Client) InvokeByFlattenMethod(action string, args interface{}, response interface{}) error {
if err := client.ensureProperties(); err != nil {
return err
}
request := Request{}
request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID)
query := util.ConvertToQueryValues(request)
util.SetQueryValueByFlattenMethod(args, &query)
// Sign request
signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret)
// Generate the request URL
requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature)
httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil)
if err != nil {
return GetClientError(err)
}
// TODO move to util and add build val flag
httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo)
httpReq.Header.Set("User-Agent", httpReq.UserAgent()+" "+client.userAgent)
t0 := time.Now()
httpResp, err := client.httpClient.Do(httpReq)
t1 := time.Now()
if err != nil {
return GetClientError(err)
}
statusCode := httpResp.StatusCode
if client.debug {
log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0))
}
defer httpResp.Body.Close()
body, err := ioutil.ReadAll(httpResp.Body)
if err != nil {
return GetClientError(err)
}
if client.debug {
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, body, "", " ")
log.Println(string(prettyJSON.Bytes()))
}
if statusCode >= 400 && statusCode <= 599 {
errorResponse := ErrorResponse{}
err = json.Unmarshal(body, &errorResponse)
ecsError := &Error{
ErrorResponse: errorResponse,
StatusCode: statusCode,
}
return ecsError
}
err = json.Unmarshal(body, response)
//log.Printf("%++v", response)
if err != nil {
return GetClientError(err)
}
return nil
}
// Invoke sends the raw HTTP request for ECS services
//改进了一下上面那个方法可以使用各种Http方法
//2017.1.30 增加了一个path参数用来拓展访问的地址
func (client *Client) InvokeByAnyMethod(method, action, path string, args interface{}, response interface{}) error {
if err := client.ensureProperties(); err != nil {
return err
}
request := Request{}
request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID)
data := util.ConvertToQueryValues(request)
util.SetQueryValues(args, &data)
// Sign request
signature := util.CreateSignatureForRequest(method, &data, client.AccessKeySecret)
data.Add("Signature", signature)
// Generate the request URL
var (
httpReq *http.Request
err error
)
if method == http.MethodGet {
requestURL := client.endpoint + path + "?" + data.Encode()
//fmt.Println(requestURL)
httpReq, err = http.NewRequest(method, requestURL, nil)
} else {
//fmt.Println(client.endpoint + path)
httpReq, err = http.NewRequest(method, client.endpoint+path, strings.NewReader(data.Encode()))
httpReq.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
if err != nil {
return GetClientError(err)
}
// TODO move to util and add build val flag
httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo)
httpReq.Header.Set("User-Agent", httpReq.Header.Get("User-Agent")+" "+client.userAgent)
t0 := time.Now()
httpResp, err := client.httpClient.Do(httpReq)
t1 := time.Now()
if err != nil {
return GetClientError(err)
}
statusCode := httpResp.StatusCode
if client.debug {
log.Printf("Invoke %s %s %d (%v) %v", ECSRequestMethod, client.endpoint, statusCode, t1.Sub(t0), data.Encode())
}
defer httpResp.Body.Close()
body, err := ioutil.ReadAll(httpResp.Body)
if err != nil {
return GetClientError(err)
}
if client.debug {
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, body, "", " ")
log.Println(string(prettyJSON.Bytes()))
}
if statusCode >= 400 && statusCode <= 599 {
errorResponse := ErrorResponse{}
err = json.Unmarshal(body, &errorResponse)
ecsError := &Error{
ErrorResponse: errorResponse,
StatusCode: statusCode,
}
return ecsError
}
err = json.Unmarshal(body, response)
//log.Printf("%++v", response)
if err != nil {
return GetClientError(err)
}
return nil
}
// GenerateClientToken generates the Client Token with random string
func (client *Client) GenerateClientToken() string {
return util.CreateRandomString()
}
func GetClientErrorFromString(str string) error {
return &Error{
ErrorResponse: ErrorResponse{
Code: "AliyunGoClientFailure",
Message: str,
},
StatusCode: -1,
}
}
func GetClientError(err error) error {
return GetClientErrorFromString(err.Error())
}

View File

@ -0,0 +1,191 @@
package common
import (
"encoding/xml"
"fmt"
"io/ioutil"
"os"
"strings"
)
const (
// LocationDefaultEndpoint is the default API endpoint of Location services
locationDefaultEndpoint = "https://location.aliyuncs.com"
locationAPIVersion = "2015-06-12"
HTTP_PROTOCOL = "http"
HTTPS_PROTOCOL = "https"
)
var (
endpoints = make(map[Region]map[string]string)
SpecailEnpoints = map[Region]map[string]string{
APNorthEast1: {
"ecs": "https://ecs.ap-northeast-1.aliyuncs.com",
"slb": "https://slb.ap-northeast-1.aliyuncs.com",
"rds": "https://rds.ap-northeast-1.aliyuncs.com",
"vpc": "https://vpc.ap-northeast-1.aliyuncs.com",
},
APSouthEast2: {
"ecs": "https://ecs.ap-southeast-2.aliyuncs.com",
"slb": "https://slb.ap-southeast-2.aliyuncs.com",
"rds": "https://rds.ap-southeast-2.aliyuncs.com",
"vpc": "https://vpc.ap-southeast-2.aliyuncs.com",
},
APSouthEast3: {
"ecs": "https://ecs.ap-southeast-3.aliyuncs.com",
"slb": "https://slb.ap-southeast-3.aliyuncs.com",
"rds": "https://rds.ap-southeast-3.aliyuncs.com",
"vpc": "https://vpc.ap-southeast-3.aliyuncs.com",
},
MEEast1: {
"ecs": "https://ecs.me-east-1.aliyuncs.com",
"slb": "https://slb.me-east-1.aliyuncs.com",
"rds": "https://rds.me-east-1.aliyuncs.com",
"vpc": "https://vpc.me-east-1.aliyuncs.com",
},
EUCentral1: {
"ecs": "https://ecs.eu-central-1.aliyuncs.com",
"slb": "https://slb.eu-central-1.aliyuncs.com",
"rds": "https://rds.eu-central-1.aliyuncs.com",
"vpc": "https://vpc.eu-central-1.aliyuncs.com",
},
Zhangjiakou: {
"ecs": "https://ecs.cn-zhangjiakou.aliyuncs.com",
"slb": "https://slb.cn-zhangjiakou.aliyuncs.com",
"rds": "https://rds.cn-zhangjiakou.aliyuncs.com",
"vpc": "https://vpc.cn-zhangjiakou.aliyuncs.com",
},
Huhehaote: {
"ecs": "https://ecs.cn-huhehaote.aliyuncs.com",
"slb": "https://slb.cn-huhehaote.aliyuncs.com",
"rds": "https://rds.cn-huhehaote.aliyuncs.com",
"vpc": "https://vpc.cn-huhehaote.aliyuncs.com",
},
}
)
//init endpoints from file
func init() {
}
type LocationClient struct {
Client
}
func NewLocationClient(accessKeyId, accessKeySecret, securityToken string) *LocationClient {
endpoint := os.Getenv("LOCATION_ENDPOINT")
if endpoint == "" {
endpoint = locationDefaultEndpoint
}
client := &LocationClient{}
client.Init(endpoint, locationAPIVersion, accessKeyId, accessKeySecret)
client.securityToken = securityToken
return client
}
func NewLocationClientWithSecurityToken(accessKeyId, accessKeySecret, securityToken string) *LocationClient {
endpoint := os.Getenv("LOCATION_ENDPOINT")
if endpoint == "" {
endpoint = locationDefaultEndpoint
}
client := &LocationClient{}
client.WithEndpoint(endpoint).
WithVersion(locationAPIVersion).
WithAccessKeyId(accessKeyId).
WithAccessKeySecret(accessKeySecret).
WithSecurityToken(securityToken).
InitClient()
return client
}
func (client *LocationClient) DescribeEndpoint(args *DescribeEndpointArgs) (*DescribeEndpointResponse, error) {
response := &DescribeEndpointResponse{}
err := client.Invoke("DescribeEndpoint", args, response)
if err != nil {
return nil, err
}
return response, err
}
func (client *LocationClient) DescribeEndpoints(args *DescribeEndpointsArgs) (*DescribeEndpointsResponse, error) {
response := &DescribeEndpointsResponse{}
err := client.Invoke("DescribeEndpoints", args, response)
if err != nil {
return nil, err
}
return response, err
}
func getProductRegionEndpoint(region Region, serviceCode string) string {
if sp, ok := endpoints[region]; ok {
if endpoint, ok := sp[serviceCode]; ok {
return endpoint
}
}
return ""
}
func setProductRegionEndpoint(region Region, serviceCode string, endpoint string) {
endpoints[region] = map[string]string{
serviceCode: endpoint,
}
}
func (client *LocationClient) DescribeOpenAPIEndpoint(region Region, serviceCode string) string {
if endpoint := getProductRegionEndpoint(region, serviceCode); endpoint != "" {
return endpoint
}
defaultProtocols := HTTP_PROTOCOL
args := &DescribeEndpointsArgs{
Id: region,
ServiceCode: serviceCode,
Type: "openAPI",
}
endpoint, err := client.DescribeEndpoints(args)
if err != nil || len(endpoint.Endpoints.Endpoint) <= 0 {
return ""
}
for _, protocol := range endpoint.Endpoints.Endpoint[0].Protocols.Protocols {
if strings.ToLower(protocol) == HTTPS_PROTOCOL {
defaultProtocols = HTTPS_PROTOCOL
break
}
}
ep := fmt.Sprintf("%s://%s", defaultProtocols, endpoint.Endpoints.Endpoint[0].Endpoint)
//setProductRegionEndpoint(region, serviceCode, ep)
return ep
}
func loadEndpointFromFile(region Region, serviceCode string) string {
data, err := ioutil.ReadFile("./endpoints.xml")
if err != nil {
return ""
}
var endpoints Endpoints
err = xml.Unmarshal(data, &endpoints)
if err != nil {
return ""
}
for _, endpoint := range endpoints.Endpoint {
if endpoint.RegionIds.RegionId == string(region) {
for _, product := range endpoint.Products.Product {
if strings.ToLower(product.ProductName) == serviceCode {
return fmt.Sprintf("%s://%s", HTTPS_PROTOCOL, product.DomainName)
}
}
}
}
return ""
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,44 @@
package common
// Region represents ECS region
type Region string
// Constants of region definition
const (
Hangzhou = Region("cn-hangzhou")
Qingdao = Region("cn-qingdao")
Beijing = Region("cn-beijing")
Hongkong = Region("cn-hongkong")
Shenzhen = Region("cn-shenzhen")
Shanghai = Region("cn-shanghai")
Zhangjiakou = Region("cn-zhangjiakou")
Huhehaote = Region("cn-huhehaote")
APSouthEast1 = Region("ap-southeast-1")
APNorthEast1 = Region("ap-northeast-1")
APSouthEast2 = Region("ap-southeast-2")
APSouthEast3 = Region("ap-southeast-3")
APSouthEast5 = Region("ap-southeast-5")
APSouth1 = Region("ap-south-1")
USWest1 = Region("us-west-1")
USEast1 = Region("us-east-1")
MEEast1 = Region("me-east-1")
EUCentral1 = Region("eu-central-1")
ShenZhenFinance = Region("cn-shenzhen-finance-1")
ShanghaiFinance = Region("cn-shanghai-finance-1")
)
var ValidRegions = []Region{
Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, Zhangjiakou, Huhehaote,
USWest1, USEast1,
APNorthEast1, APSouthEast1, APSouthEast2, APSouthEast3, APSouthEast5,
APSouth1,
MEEast1,
EUCentral1,
ShenZhenFinance, ShanghaiFinance,
}

105
vendor/github.com/denverdino/aliyungo/common/request.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
package common
import (
"fmt"
"log"
"time"
"github.com/denverdino/aliyungo/util"
)
// Constants for Aliyun API requests
const (
SignatureVersion = "1.0"
SignatureMethod = "HMAC-SHA1"
JSONResponseFormat = "JSON"
XMLResponseFormat = "XML"
ECSRequestMethod = "GET"
)
type Request struct {
Format string
Version string
RegionId Region
AccessKeyId string
SecurityToken string
Signature string
SignatureMethod string
Timestamp util.ISO6801Time
SignatureVersion string
SignatureNonce string
ResourceOwnerAccount string
Action string
}
func (request *Request) init(version string, action string, AccessKeyId string, securityToken string, regionId Region) {
request.Format = JSONResponseFormat
request.Timestamp = util.NewISO6801Time(time.Now().UTC())
request.Version = version
request.SignatureVersion = SignatureVersion
request.SignatureMethod = SignatureMethod
request.SignatureNonce = util.CreateRandomString()
request.Action = action
request.AccessKeyId = AccessKeyId
request.SecurityToken = securityToken
request.RegionId = regionId
}
type Response struct {
RequestId string
}
type ErrorResponse struct {
Response
HostId string
Code string
Message string
}
// An Error represents a custom error for Aliyun API failure response
type Error struct {
ErrorResponse
StatusCode int //Status Code of HTTP Response
}
func (e *Error) Error() string {
return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message)
}
type Pagination struct {
PageNumber int
PageSize int
}
func (p *Pagination) SetPageSize(size int) {
p.PageSize = size
}
func (p *Pagination) Validate() {
if p.PageNumber < 0 {
log.Printf("Invalid PageNumber: %d", p.PageNumber)
p.PageNumber = 1
}
if p.PageSize < 0 {
log.Printf("Invalid PageSize: %d", p.PageSize)
p.PageSize = 10
} else if p.PageSize > 50 {
log.Printf("Invalid PageSize: %d", p.PageSize)
p.PageSize = 50
}
}
// A PaginationResponse represents a response with pagination information
type PaginationResult struct {
TotalCount int
PageNumber int
PageSize int
}
// NextPage gets the next page of the result set
func (r *PaginationResult) NextPage() *Pagination {
if r.PageNumber*r.PageSize >= r.TotalCount {
return nil
}
return &Pagination{PageNumber: r.PageNumber + 1, PageSize: r.PageSize}
}

107
vendor/github.com/denverdino/aliyungo/common/types.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
package common
type InternetChargeType string
const (
PayByBandwidth = InternetChargeType("PayByBandwidth")
PayByTraffic = InternetChargeType("PayByTraffic")
)
type InstanceChargeType string
const (
PrePaid = InstanceChargeType("PrePaid")
PostPaid = InstanceChargeType("PostPaid")
)
type DescribeEndpointArgs struct {
Id Region
ServiceCode string
Type string
}
type EndpointItem struct {
Protocols struct {
Protocols []string
}
Type string
Namespace string
Id Region
SerivceCode string
Endpoint string
}
type DescribeEndpointResponse struct {
Response
EndpointItem
}
type DescribeEndpointsArgs struct {
Id Region
ServiceCode string
Type string
}
type DescribeEndpointsResponse struct {
Response
Endpoints APIEndpoints
RequestId string
Success bool
}
type APIEndpoints struct {
Endpoint []EndpointItem
}
type NetType string
const (
Internet = NetType("Internet")
Intranet = NetType("Intranet")
)
type TimeType string
const (
Hour = TimeType("Hour")
Day = TimeType("Day")
Week = TimeType("Week")
Month = TimeType("Month")
Year = TimeType("Year")
)
type NetworkType string
const (
Classic = NetworkType("Classic")
VPC = NetworkType("VPC")
)
type BusinessInfo struct {
Pack string `json:"pack,omitempty"`
ActivityId string `json:"activityId,omitempty"`
}
//xml
type Endpoints struct {
Endpoint []Endpoint `xml:"Endpoint"`
}
type Endpoint struct {
Name string `xml:"name,attr"`
RegionIds RegionIds `xml:"RegionIds"`
Products Products `xml:"Products"`
}
type RegionIds struct {
RegionId string `xml:"RegionId"`
}
type Products struct {
Product []Product `xml:"Product"`
}
type Product struct {
ProductName string `xml:"ProductName"`
DomainName string `xml:"DomainName"`
}

View File

@ -0,0 +1,3 @@
package common
const Version = "0.1"

19
vendor/github.com/denverdino/aliyungo/oss/BUILD.bazel generated vendored Normal file
View File

@ -0,0 +1,19 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"authenticate_callback.go",
"client.go",
"export.go",
"multi.go",
"regions.go",
"signature.go",
],
importpath = "github.com/denverdino/aliyungo/oss",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/denverdino/aliyungo/common:go_default_library",
"//vendor/github.com/denverdino/aliyungo/util:go_default_library",
],
)

View File

@ -0,0 +1,88 @@
package oss
import (
"crypto"
"crypto/md5"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"io/ioutil"
"net/http"
"regexp"
"strings"
"sync"
)
type authenticationType struct {
lock *sync.RWMutex
certificate map[string]*rsa.PublicKey
}
var (
authentication = authenticationType{lock: &sync.RWMutex{}, certificate: map[string]*rsa.PublicKey{}}
urlReg = regexp.MustCompile(`^http(|s)://gosspublic.alicdn.com/[0-9a-zA-Z]`)
)
//验证OSS向业务服务器发来的回调函数。
//该方法是并发安全的
//pubKeyUrl 回调请求头中[x-oss-pub-key-url]一项以Base64编码
//reqUrl oss所发来请求的url由path+query组成
//reqBody oss所发来请求的body
//authorization authorization为回调头中的签名
func AuthenticateCallBack(pubKeyUrl, reqUrl, reqBody, authorization string) error {
//获取证书url
keyURL, err := base64.URLEncoding.DecodeString(pubKeyUrl)
if err != nil {
return err
}
url := string(keyURL)
//判断证书是否来自于阿里云
if !urlReg.Match(keyURL) {
return errors.New("certificate address error")
}
//获取文件名
rs := []rune(url)
filename := string(rs[strings.LastIndex(url, "/") : len(rs)-1])
authentication.lock.RLock()
certificate := authentication.certificate[filename]
authentication.lock.RUnlock()
//内存中没有证书,下载
if certificate == nil {
authentication.lock.Lock()
res, err := http.Get(url)
if err != nil {
return err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
block, _ := pem.Decode(body)
if block == nil {
return errors.New("certificate error")
}
pubKey, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return err
}
certificate = pubKey.(*rsa.PublicKey)
authentication.certificate[filename] = certificate
authentication.lock.Unlock()
}
//证书准备完毕,开始验证
//解析签名
signature, err := base64.StdEncoding.DecodeString(authorization)
if err != nil {
return err
}
hashed := md5.New()
hashed.Write([]byte(reqUrl + "\n" + reqBody))
if err := rsa.VerifyPKCS1v15(certificate, crypto.MD5, hashed.Sum(nil), signature); err != nil {
return err
}
//验证通过
return nil
}

1394
vendor/github.com/denverdino/aliyungo/oss/client.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

23
vendor/github.com/denverdino/aliyungo/oss/export.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package oss
import (
"github.com/denverdino/aliyungo/util"
)
var originalStrategy = attempts
func SetAttemptStrategy(s *util.AttemptStrategy) {
if s == nil {
attempts = originalStrategy
} else {
attempts = *s
}
}
func SetListPartsMax(n int) {
listPartsMax = n
}
func SetListMultiMax(n int) {
listMultiMax = n
}

489
vendor/github.com/denverdino/aliyungo/oss/multi.go generated vendored Normal file
View File

@ -0,0 +1,489 @@
package oss
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"errors"
"io"
"time"
//"log"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
)
// Multi represents an unfinished multipart upload.
//
// Multipart uploads allow sending big objects in smaller chunks.
// After all parts have been sent, the upload must be explicitly
// completed by calling Complete with the list of parts.
type Multi struct {
Bucket *Bucket
Key string
UploadId string
}
// That's the default. Here just for testing.
var listMultiMax = 1000
type listMultiResp struct {
NextKeyMarker string
NextUploadIdMarker string
IsTruncated bool
Upload []Multi
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
}
// ListMulti returns the list of unfinished multipart uploads in b.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix. You can use prefixes to separate a bucket into different
// groupings of keys (to get the feeling of folders, for example).
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
params := make(url.Values)
params.Set("uploads", "")
params.Set("max-uploads", strconv.FormatInt(int64(listMultiMax), 10))
params.Set("prefix", prefix)
params.Set("delimiter", delim)
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "GET",
bucket: b.Name,
params: params,
}
var resp listMultiResp
err := b.Client.query(req, &resp)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, nil, err
}
for i := range resp.Upload {
multi := &resp.Upload[i]
multi.Bucket = b
multis = append(multis, multi)
}
prefixes = append(prefixes, resp.CommonPrefixes...)
if !resp.IsTruncated {
return multis, prefixes, nil
}
params.Set("key-marker", resp.NextKeyMarker)
params.Set("upload-id-marker", resp.NextUploadIdMarker)
attempt = attempts.Start() // Last request worked.
}
panic("unreachable")
}
// Multi returns a multipart upload handler for the provided key
// inside b. If a multipart upload exists for key, it is returned,
// otherwise a new multipart upload is initiated with contType and perm.
func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) {
multis, _, err := b.ListMulti(key, "")
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
for _, m := range multis {
if m.Key == key {
return m, nil
}
}
return b.InitMulti(key, contType, perm, options)
}
// InitMulti initializes a new multipart upload at the provided
// key inside b and returns a value for manipulating it.
//
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&InitiateMultipartUpload
func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) {
headers := make(http.Header)
headers.Set("Content-Length", "0")
headers.Set("Content-Type", contType)
headers.Set("x-oss-acl", string(perm))
options.addHeaders(headers)
params := make(url.Values)
params.Set("uploads", "")
req := &request{
method: "POST",
bucket: b.Name,
path: key,
headers: headers,
params: params,
}
var err error
var resp struct {
UploadId string `xml:"UploadId"`
}
for attempt := attempts.Start(); attempt.Next(); {
err = b.Client.query(req, &resp)
if !shouldRetry(err) {
break
}
}
if err != nil {
return nil, err
}
return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
}
func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) {
return m.PutPartCopyWithContentLength(n, options, source, -1)
}
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPartCopy
func (m *Multi) PutPartCopyWithContentLength(n int, options CopyOptions, source string, contentLength int64) (*CopyObjectResult, Part, error) {
// TODO source format a /BUCKET/PATH/TO/OBJECT
// TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path)
headers := make(http.Header)
headers.Set("x-oss-copy-source", source)
options.addHeaders(headers)
params := make(url.Values)
params.Set("uploadId", m.UploadId)
params.Set("partNumber", strconv.FormatInt(int64(n), 10))
if contentLength < 0 {
sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/"))
//log.Println("source: ", source)
//log.Println("sourceBucket: ", sourceBucket.Name)
//log.Println("HEAD: ", strings.strings.SplitAfterN(source, "/", 3)[2])
// TODO SplitAfterN can be use in bucket name
sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 3)[2], nil)
if err != nil {
return nil, Part{}, err
}
contentLength = sourceMeta.ContentLength
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "PUT",
bucket: m.Bucket.Name,
path: m.Key,
headers: headers,
params: params,
}
resp := &CopyObjectResult{}
err := m.Bucket.Client.query(req, resp)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, Part{}, err
}
if resp.ETag == "" {
return nil, Part{}, errors.New("part upload succeeded with no ETag")
}
return resp, Part{n, resp.ETag, contentLength}, nil
}
panic("unreachable")
}
// PutPart sends part n of the multipart upload, reading all the content from r.
// Each part, except for the last one, must be at least 5MB in size.
//
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPart
func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
partSize, _, md5b64, err := seekerInfo(r)
if err != nil {
return Part{}, err
}
return m.putPart(n, r, partSize, md5b64, 0)
}
func (m *Multi) PutPartWithTimeout(n int, r io.ReadSeeker, timeout time.Duration) (Part, error) {
partSize, _, md5b64, err := seekerInfo(r)
if err != nil {
return Part{}, err
}
return m.putPart(n, r, partSize, md5b64, timeout)
}
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string, timeout time.Duration) (Part, error) {
headers := make(http.Header)
headers.Set("Content-Length", strconv.FormatInt(partSize, 10))
headers.Set("Content-MD5", md5b64)
params := make(url.Values)
params.Set("uploadId", m.UploadId)
params.Set("partNumber", strconv.FormatInt(int64(n), 10))
for attempt := attempts.Start(); attempt.Next(); {
_, err := r.Seek(0, 0)
if err != nil {
return Part{}, err
}
req := &request{
method: "PUT",
bucket: m.Bucket.Name,
path: m.Key,
headers: headers,
params: params,
payload: r,
timeout: timeout,
}
err = m.Bucket.Client.prepare(req)
if err != nil {
return Part{}, err
}
resp, err := m.Bucket.Client.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return Part{}, err
}
etag := resp.Header.Get("ETag")
if etag == "" {
return Part{}, errors.New("part upload succeeded with no ETag")
}
return Part{n, etag, partSize}, nil
}
panic("unreachable")
}
func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
_, err = r.Seek(0, 0)
if err != nil {
return 0, "", "", err
}
digest := md5.New()
size, err = io.Copy(digest, r)
if err != nil {
return 0, "", "", err
}
sum := digest.Sum(nil)
md5hex = hex.EncodeToString(sum)
md5b64 = base64.StdEncoding.EncodeToString(sum)
return size, md5hex, md5b64, nil
}
type Part struct {
N int `xml:"PartNumber"`
ETag string
Size int64
}
type partSlice []Part
func (s partSlice) Len() int { return len(s) }
func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type listPartsResp struct {
NextPartNumberMarker string
IsTruncated bool
Part []Part
}
// That's the default. Here just for testing.
var listPartsMax = 1000
// ListParts for backcompatability. See the documentation for ListPartsFull
func (m *Multi) ListParts() ([]Part, error) {
return m.ListPartsFull(0, listPartsMax)
}
// ListPartsFull returns the list of previously uploaded parts in m,
// ordered by part number (Only parts with higher part numbers than
// partNumberMarker will be listed). Only up to maxParts parts will be
// returned.
//
func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) {
if maxParts > listPartsMax {
maxParts = listPartsMax
}
params := make(url.Values)
params.Set("uploadId", m.UploadId)
params.Set("max-parts", strconv.FormatInt(int64(maxParts), 10))
params.Set("part-number-marker", strconv.FormatInt(int64(partNumberMarker), 10))
var parts partSlice
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "GET",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
}
var resp listPartsResp
err := m.Bucket.Client.query(req, &resp)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
parts = append(parts, resp.Part...)
if !resp.IsTruncated {
sort.Sort(parts)
return parts, nil
}
params.Set("part-number-marker", resp.NextPartNumberMarker)
attempt = attempts.Start() // Last request worked.
}
panic("unreachable")
}
type ReaderAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}
// PutAll sends all of r via a multipart upload with parts no larger
// than partSize bytes, which must be set to at least 5MB.
// Parts previously uploaded are either reused if their checksum
// and size match the new part, or otherwise overwritten with the
// new content.
// PutAll returns all the parts of m (reused or not).
func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
old, err := m.ListParts()
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
reuse := 0 // Index of next old part to consider reusing.
current := 1 // Part number of latest good part handled.
totalSize, err := r.Seek(0, 2)
if err != nil {
return nil, err
}
first := true // Must send at least one empty part if the file is empty.
var result []Part
NextSection:
for offset := int64(0); offset < totalSize || first; offset += partSize {
first = false
if offset+partSize > totalSize {
partSize = totalSize - offset
}
section := io.NewSectionReader(r, offset, partSize)
_, md5hex, md5b64, err := seekerInfo(section)
if err != nil {
return nil, err
}
for reuse < len(old) && old[reuse].N <= current {
// Looks like this part was already sent.
part := &old[reuse]
etag := `"` + md5hex + `"`
if part.N == current && part.Size == partSize && part.ETag == etag {
// Checksum matches. Reuse the old part.
result = append(result, *part)
current++
continue NextSection
}
reuse++
}
// Part wasn't found or doesn't match. Send it.
part, err := m.putPart(current, section, partSize, md5b64, 0)
if err != nil {
return nil, err
}
result = append(result, part)
current++
}
return result, nil
}
type completeUpload struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts completeParts `xml:"Part"`
}
type completePart struct {
PartNumber int
ETag string
}
type completeParts []completePart
func (p completeParts) Len() int { return len(p) }
func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Complete assembles the given previously uploaded parts into the
// final object. This operation may take several minutes.
//
func (m *Multi) Complete(parts []Part) error {
params := make(url.Values)
params.Set("uploadId", m.UploadId)
c := completeUpload{}
for _, p := range parts {
c.Parts = append(c.Parts, completePart{p.N, p.ETag})
}
sort.Sort(c.Parts)
data, err := xml.Marshal(&c)
if err != nil {
return err
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "POST",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
payload: bytes.NewReader(data),
}
err := m.Bucket.Client.query(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
return err
}
panic("unreachable")
}
// Abort deletes an unifinished multipart upload and any previously
// uploaded parts for it.
//
// After a multipart upload is aborted, no additional parts can be
// uploaded using it. However, if any part uploads are currently in
// progress, those part uploads might or might not succeed. As a result,
// it might be necessary to abort a given multipart upload multiple
// times in order to completely free all storage consumed by all parts.
//
// NOTE: If the described scenario happens to you, please report back to
// the goamz authors with details. In the future such retrying should be
// handled internally, but it's not clear what happens precisely (Is an
// error returned? Is the issue completely undetectable?).
//
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&AbortMultipartUpload
func (m *Multi) Abort() error {
params := make(url.Values)
params.Set("uploadId", m.UploadId)
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "DELETE",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
}
err := m.Bucket.Client.query(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
return err
}
panic("unreachable")
}

79
vendor/github.com/denverdino/aliyungo/oss/regions.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
package oss
import (
"fmt"
)
// Region represents OSS region
type Region string
// Constants of region definition
const (
Hangzhou = Region("oss-cn-hangzhou")
Qingdao = Region("oss-cn-qingdao")
Beijing = Region("oss-cn-beijing")
Hongkong = Region("oss-cn-hongkong")
Shenzhen = Region("oss-cn-shenzhen")
Shanghai = Region("oss-cn-shanghai")
Zhangjiakou = Region("oss-cn-zhangjiakou")
Huhehaote = Region("oss-cn-huhehaote")
USWest1 = Region("oss-us-west-1")
USEast1 = Region("oss-us-east-1")
APSouthEast1 = Region("oss-ap-southeast-1")
APNorthEast1 = Region("oss-ap-northeast-1")
APSouthEast2 = Region("oss-ap-southeast-2")
MEEast1 = Region("oss-me-east-1")
EUCentral1 = Region("oss-eu-central-1")
DefaultRegion = Hangzhou
)
// GetEndpoint returns endpoint of region
func (r Region) GetEndpoint(internal bool, bucket string, secure bool) string {
if internal {
return r.GetInternalEndpoint(bucket, secure)
}
return r.GetInternetEndpoint(bucket, secure)
}
func getProtocol(secure bool) string {
protocol := "http"
if secure {
protocol = "https"
}
return protocol
}
// GetInternetEndpoint returns internet endpoint of region
func (r Region) GetInternetEndpoint(bucket string, secure bool) string {
protocol := getProtocol(secure)
if bucket == "" {
return fmt.Sprintf("%s://oss.aliyuncs.com", protocol)
}
return fmt.Sprintf("%s://%s.%s.aliyuncs.com", protocol, bucket, string(r))
}
// GetInternalEndpoint returns internal endpoint of region
func (r Region) GetInternalEndpoint(bucket string, secure bool) string {
protocol := getProtocol(secure)
if bucket == "" {
return fmt.Sprintf("%s://oss-internal.aliyuncs.com", protocol)
}
return fmt.Sprintf("%s://%s.%s-internal.aliyuncs.com", protocol, bucket, string(r))
}
// GetInternalEndpoint returns internal endpoint of region
func (r Region) GetVPCInternalEndpoint(bucket string, secure bool) string {
protocol := getProtocol(secure)
if bucket == "" {
return fmt.Sprintf("%s://vpc100-oss-cn-hangzhou.aliyuncs.com", protocol)
}
if r == USEast1 {
return r.GetInternalEndpoint(bucket, secure)
} else {
return fmt.Sprintf("%s://%s.vpc100-%s.aliyuncs.com", protocol, bucket, string(r))
}
}

107
vendor/github.com/denverdino/aliyungo/oss/signature.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
package oss
import (
"github.com/denverdino/aliyungo/util"
//"log"
"net/http"
"net/url"
"sort"
"strings"
)
const HeaderOSSPrefix = "x-oss-"
var ossParamsToSign = map[string]bool{
"acl": true,
"delete": true,
"location": true,
"logging": true,
"notification": true,
"partNumber": true,
"policy": true,
"requestPayment": true,
"torrent": true,
"uploadId": true,
"uploads": true,
"versionId": true,
"versioning": true,
"versions": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
"bucketInfo": true,
}
func (client *Client) signRequest(request *request) {
query := request.params
urlSignature := query.Get("OSSAccessKeyId") != ""
headers := request.headers
contentMd5 := headers.Get("Content-Md5")
contentType := headers.Get("Content-Type")
date := ""
if urlSignature {
date = query.Get("Expires")
} else {
date = headers.Get("Date")
}
resource := request.path
if request.bucket != "" {
resource = "/" + request.bucket + request.path
}
params := make(url.Values)
for k, v := range query {
if ossParamsToSign[k] {
params[k] = v
}
}
if len(params) > 0 {
resource = resource + "?" + util.Encode(params)
}
canonicalizedResource := resource
_, canonicalizedHeader := canonicalizeHeader(headers)
stringToSign := request.method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedHeader + canonicalizedResource
//log.Println("stringToSign: ", stringToSign)
signature := util.CreateSignature(stringToSign, client.AccessKeySecret)
if query.Get("OSSAccessKeyId") != "" {
query.Set("Signature", signature)
} else {
headers.Set("Authorization", "OSS "+client.AccessKeyId+":"+signature)
}
}
//Have to break the abstraction to append keys with lower case.
func canonicalizeHeader(headers http.Header) (newHeaders http.Header, result string) {
var canonicalizedHeaders []string
newHeaders = http.Header{}
for k, v := range headers {
if lower := strings.ToLower(k); strings.HasPrefix(lower, HeaderOSSPrefix) {
newHeaders[lower] = v
canonicalizedHeaders = append(canonicalizedHeaders, lower)
} else {
newHeaders[k] = v
}
}
sort.Strings(canonicalizedHeaders)
var canonicalizedHeader string
for _, k := range canonicalizedHeaders {
canonicalizedHeader += k + ":" + headers.Get(k) + "\n"
}
return newHeaders, canonicalizedHeader
}

14
vendor/github.com/denverdino/aliyungo/util/BUILD.bazel generated vendored Normal file
View File

@ -0,0 +1,14 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"attempt.go",
"encoding.go",
"iso6801.go",
"signature.go",
"util.go",
],
importpath = "github.com/denverdino/aliyungo/util",
visibility = ["//visibility:public"],
)

76
vendor/github.com/denverdino/aliyungo/util/attempt.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
package util
import (
"time"
)
// AttemptStrategy is reused from the goamz package
// AttemptStrategy represents a strategy for waiting for an action
// to complete successfully. This is an internal type used by the
// implementation of other packages.
type AttemptStrategy struct {
Total time.Duration // total duration of attempt.
Delay time.Duration // interval between each try in the burst.
Min int // minimum number of retries; overrides Total
}
type Attempt struct {
strategy AttemptStrategy
last time.Time
end time.Time
force bool
count int
}
// Start begins a new sequence of attempts for the given strategy.
func (s AttemptStrategy) Start() *Attempt {
now := time.Now()
return &Attempt{
strategy: s,
last: now,
end: now.Add(s.Total),
force: true,
}
}
// Next waits until it is time to perform the next attempt or returns
// false if it is time to stop trying.
func (a *Attempt) Next() bool {
now := time.Now()
sleep := a.nextSleep(now)
if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
return false
}
a.force = false
if sleep > 0 && a.count > 0 {
time.Sleep(sleep)
now = time.Now()
}
a.count++
a.last = now
return true
}
func (a *Attempt) nextSleep(now time.Time) time.Duration {
sleep := a.strategy.Delay - now.Sub(a.last)
if sleep < 0 {
return 0
}
return sleep
}
// HasNext returns whether another attempt will be made if the current
// one fails. If it returns true, the following call to Next is
// guaranteed to return true.
func (a *Attempt) HasNext() bool {
if a.force || a.strategy.Min > a.count {
return true
}
now := time.Now()
if now.Add(a.nextSleep(now)).Before(a.end) {
a.force = true
return true
}
return false
}

331
vendor/github.com/denverdino/aliyungo/util/encoding.go generated vendored Normal file
View File

@ -0,0 +1,331 @@
package util
import (
"encoding/json"
"fmt"
"log"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
// change instance=["a", "b"]
// to instance.1="a" instance.2="b"
func FlattenFn(fieldName string, field reflect.Value, values *url.Values) {
l := field.Len()
if l > 0 {
for i := 0; i < l; i++ {
str := field.Index(i).String()
values.Set(fieldName+"."+strconv.Itoa(i+1), str)
}
}
}
func Underline2Dot(name string) string {
return strings.Replace(name, "_", ".", -1)
}
//ConvertToQueryValues converts the struct to url.Values
func ConvertToQueryValues(ifc interface{}) url.Values {
values := url.Values{}
SetQueryValues(ifc, &values)
return values
}
//SetQueryValues sets the struct to existing url.Values following ECS encoding rules
func SetQueryValues(ifc interface{}, values *url.Values) {
setQueryValues(ifc, values, "")
}
func SetQueryValueByFlattenMethod(ifc interface{}, values *url.Values) {
setQueryValuesByFlattenMethod(ifc, values, "")
}
func setQueryValues(i interface{}, values *url.Values, prefix string) {
// add to support url.Values
mapValues, ok := i.(url.Values)
if ok {
for k, _ := range mapValues {
values.Set(k, mapValues.Get(k))
}
return
}
elem := reflect.ValueOf(i)
if elem.Kind() == reflect.Ptr {
elem = elem.Elem()
}
elemType := elem.Type()
for i := 0; i < elem.NumField(); i++ {
fieldName := elemType.Field(i).Name
anonymous := elemType.Field(i).Anonymous
tag := elemType.Field(i).Tag.Get("query")
argName := elemType.Field(i).Tag.Get("ArgName")
field := elem.Field(i)
// TODO Use Tag for validation
// tag := typ.Field(i).Tag.Get("tagname")
kind := field.Kind()
isPtr := false
if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() {
continue
}
if kind == reflect.Ptr {
field = field.Elem()
kind = field.Kind()
isPtr = true
}
var value string
//switch field.Interface().(type) {
switch kind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i := field.Int()
if i != 0 || isPtr {
value = strconv.FormatInt(i, 10)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
i := field.Uint()
if i != 0 || isPtr {
value = strconv.FormatUint(i, 10)
}
case reflect.Float32:
value = strconv.FormatFloat(field.Float(), 'f', 4, 32)
case reflect.Float64:
value = strconv.FormatFloat(field.Float(), 'f', 4, 64)
case reflect.Bool:
value = strconv.FormatBool(field.Bool())
case reflect.String:
value = field.String()
case reflect.Map:
ifc := field.Interface()
m := ifc.(map[string]string)
if m != nil {
j := 0
for k, v := range m {
j++
keyName := fmt.Sprintf("%s.%d.Key", fieldName, j)
values.Set(keyName, k)
valueName := fmt.Sprintf("%s.%d.Value", fieldName, j)
values.Set(valueName, v)
}
}
case reflect.Slice:
switch field.Type().Elem().Kind() {
case reflect.Uint8:
value = string(field.Bytes())
case reflect.String:
l := field.Len()
if l > 0 {
if tag == "list" {
name := argName
if argName == "" {
name = fieldName
}
for i := 0; i < l; i++ {
valueName := fmt.Sprintf("%s.%d", name, (i + 1))
values.Set(valueName, field.Index(i).String())
}
} else {
strArray := make([]string, l)
for i := 0; i < l; i++ {
strArray[i] = field.Index(i).String()
}
bytes, err := json.Marshal(strArray)
if err == nil {
value = string(bytes)
} else {
log.Printf("Failed to convert JSON: %v", err)
}
}
}
default:
l := field.Len()
for j := 0; j < l; j++ {
prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1))
ifc := field.Index(j).Interface()
//log.Printf("%s : %v", prefixName, ifc)
if ifc != nil {
setQueryValues(ifc, values, prefixName)
}
}
continue
}
default:
switch field.Interface().(type) {
case ISO6801Time:
t := field.Interface().(ISO6801Time)
value = t.String()
case time.Time:
t := field.Interface().(time.Time)
value = GetISO8601TimeStamp(t)
default:
ifc := field.Interface()
if ifc != nil {
if anonymous {
SetQueryValues(ifc, values)
} else {
prefixName := fieldName + "."
setQueryValues(ifc, values, prefixName)
}
continue
}
}
}
if value != "" {
name := argName
if argName == "" {
name = fieldName
}
if prefix != "" {
name = prefix + name
}
values.Set(name, value)
}
}
}
func setQueryValuesByFlattenMethod(i interface{}, values *url.Values, prefix string) {
// add to support url.Values
mapValues, ok := i.(url.Values)
if ok {
for k, _ := range mapValues {
values.Set(k, mapValues.Get(k))
}
return
}
elem := reflect.ValueOf(i)
if elem.Kind() == reflect.Ptr {
elem = elem.Elem()
}
elemType := elem.Type()
for i := 0; i < elem.NumField(); i++ {
fieldName := elemType.Field(i).Name
anonymous := elemType.Field(i).Anonymous
field := elem.Field(i)
// TODO Use Tag for validation
// tag := typ.Field(i).Tag.Get("tagname")
kind := field.Kind()
isPtr := false
if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() {
continue
}
if kind == reflect.Ptr {
field = field.Elem()
kind = field.Kind()
isPtr = true
}
var value string
//switch field.Interface().(type) {
switch kind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i := field.Int()
if i != 0 || isPtr {
value = strconv.FormatInt(i, 10)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
i := field.Uint()
if i != 0 || isPtr {
value = strconv.FormatUint(i, 10)
}
case reflect.Float32:
value = strconv.FormatFloat(field.Float(), 'f', 4, 32)
case reflect.Float64:
value = strconv.FormatFloat(field.Float(), 'f', 4, 64)
case reflect.Bool:
value = strconv.FormatBool(field.Bool())
case reflect.String:
value = field.String()
case reflect.Map:
ifc := field.Interface()
m := ifc.(map[string]string)
if m != nil {
j := 0
for k, v := range m {
j++
keyName := fmt.Sprintf("%s.%d.Key", fieldName, j)
values.Set(keyName, k)
valueName := fmt.Sprintf("%s.%d.Value", fieldName, j)
values.Set(valueName, v)
}
}
case reflect.Slice:
if field.Type().Name() == "FlattenArray" {
FlattenFn(fieldName, field, values)
} else {
switch field.Type().Elem().Kind() {
case reflect.Uint8:
value = string(field.Bytes())
case reflect.String:
l := field.Len()
if l > 0 {
strArray := make([]string, l)
for i := 0; i < l; i++ {
strArray[i] = field.Index(i).String()
}
bytes, err := json.Marshal(strArray)
if err == nil {
value = string(bytes)
} else {
log.Printf("Failed to convert JSON: %v", err)
}
}
default:
l := field.Len()
for j := 0; j < l; j++ {
prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1))
ifc := field.Index(j).Interface()
//log.Printf("%s : %v", prefixName, ifc)
if ifc != nil {
setQueryValuesByFlattenMethod(ifc, values, prefixName)
}
}
continue
}
}
default:
switch field.Interface().(type) {
case ISO6801Time:
t := field.Interface().(ISO6801Time)
value = t.String()
case time.Time:
t := field.Interface().(time.Time)
value = GetISO8601TimeStamp(t)
default:
ifc := field.Interface()
if ifc != nil {
if anonymous {
SetQueryValues(ifc, values)
} else {
prefixName := fieldName + "."
setQueryValuesByFlattenMethod(ifc, values, prefixName)
}
continue
}
}
}
if value != "" {
name := elemType.Field(i).Tag.Get("ArgName")
if name == "" {
name = fieldName
}
if prefix != "" {
name = prefix + name
}
// NOTE: here we will change name to underline style when the type is UnderlineString
if field.Type().Name() == "UnderlineString" {
name = Underline2Dot(name)
}
values.Set(name, value)
}
}
}

80
vendor/github.com/denverdino/aliyungo/util/iso6801.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
package util
import (
"fmt"
"strconv"
"time"
)
// GetISO8601TimeStamp gets timestamp string in ISO8601 format
func GetISO8601TimeStamp(ts time.Time) string {
t := ts.UTC()
return fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02dZ", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
}
const formatISO8601 = "2006-01-02T15:04:05Z"
const jsonFormatISO8601 = `"` + formatISO8601 + `"`
const formatISO8601withoutSeconds = "2006-01-02T15:04Z"
const jsonFormatISO8601withoutSeconds = `"` + formatISO8601withoutSeconds + `"`
// A ISO6801Time represents a time in ISO8601 format
type ISO6801Time time.Time
// New constructs a new iso8601.Time instance from an existing
// time.Time instance. This causes the nanosecond field to be set to
// 0, and its time zone set to a fixed zone with no offset from UTC
// (but it is *not* UTC itself).
func NewISO6801Time(t time.Time) ISO6801Time {
return ISO6801Time(time.Date(
t.Year(),
t.Month(),
t.Day(),
t.Hour(),
t.Minute(),
t.Second(),
0,
time.UTC,
))
}
// IsDefault checks if the time is default
func (it *ISO6801Time) IsDefault() bool {
return *it == ISO6801Time{}
}
// MarshalJSON serializes the ISO6801Time into JSON string
func (it ISO6801Time) MarshalJSON() ([]byte, error) {
return []byte(time.Time(it).Format(jsonFormatISO8601)), nil
}
// UnmarshalJSON deserializes the ISO6801Time from JSON string
func (it *ISO6801Time) UnmarshalJSON(data []byte) error {
str := string(data)
if str == "\"\"" || len(data) == 0 {
return nil
}
var t time.Time
var err error
if str[0] == '"' {
t, err = time.ParseInLocation(jsonFormatISO8601, str, time.UTC)
if err != nil {
t, err = time.ParseInLocation(jsonFormatISO8601withoutSeconds, str, time.UTC)
}
} else {
var i int64
i, err = strconv.ParseInt(str, 10, 64)
if err == nil {
t = time.Unix(i/1000, i%1000)
}
}
if err == nil {
*it = ISO6801Time(t)
}
return err
}
// String returns the time in ISO6801Time format
func (it ISO6801Time) String() string {
return time.Time(it).Format(formatISO8601)
}

View File

@ -0,0 +1,39 @@
package util
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"net/url"
"strings"
)
//CreateSignature creates signature for string following Aliyun rules
func CreateSignature(stringToSignature, accessKeySecret string) string {
// Crypto by HMAC-SHA1
hmacSha1 := hmac.New(sha1.New, []byte(accessKeySecret))
hmacSha1.Write([]byte(stringToSignature))
sign := hmacSha1.Sum(nil)
// Encode to Base64
base64Sign := base64.StdEncoding.EncodeToString(sign)
return base64Sign
}
func percentReplace(str string) string {
str = strings.Replace(str, "+", "%20", -1)
str = strings.Replace(str, "*", "%2A", -1)
str = strings.Replace(str, "%7E", "~", -1)
return str
}
// CreateSignatureForRequest creates signature for query string values
func CreateSignatureForRequest(method string, values *url.Values, accessKeySecret string) string {
canonicalizedQueryString := percentReplace(values.Encode())
stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString)
return CreateSignature(stringToSign, accessKeySecret)
}

147
vendor/github.com/denverdino/aliyungo/util/util.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
package util
import (
"bytes"
srand "crypto/rand"
"encoding/binary"
"math/rand"
"net/http"
"net/url"
"sort"
"time"
)
const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
//CreateRandomString create random string
func CreateRandomString() string {
b := make([]byte, 32)
l := len(dictionary)
_, err := srand.Read(b)
if err != nil {
// fail back to insecure rand
rand.Seed(time.Now().UnixNano())
for i := range b {
b[i] = dictionary[rand.Int()%l]
}
} else {
for i, v := range b {
b[i] = dictionary[v%byte(l)]
}
}
return string(b)
}
// Encode encodes the values into ``URL encoded'' form
// ("acl&bar=baz&foo=quux") sorted by key.
func Encode(v url.Values) string {
if v == nil {
return ""
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := url.QueryEscape(k)
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
if v != "" {
buf.WriteString("=")
buf.WriteString(url.QueryEscape(v))
}
}
}
return buf.String()
}
func GetGMTime() string {
return time.Now().UTC().Format(http.TimeFormat)
}
//
func randUint32() uint32 {
return randUint32Slice(1)[0]
}
func randUint32Slice(c int) []uint32 {
b := make([]byte, c*4)
_, err := srand.Read(b)
if err != nil {
// fail back to insecure rand
rand.Seed(time.Now().UnixNano())
for i := range b {
b[i] = byte(rand.Int())
}
}
n := make([]uint32, c)
for i := range n {
n[i] = binary.BigEndian.Uint32(b[i*4 : i*4+4])
}
return n
}
func toByte(n uint32, st, ed byte) byte {
return byte(n%uint32(ed-st+1) + uint32(st))
}
func toDigit(n uint32) byte {
return toByte(n, '0', '9')
}
func toLowerLetter(n uint32) byte {
return toByte(n, 'a', 'z')
}
func toUpperLetter(n uint32) byte {
return toByte(n, 'A', 'Z')
}
type convFunc func(uint32) byte
var convFuncs = []convFunc{toDigit, toLowerLetter, toUpperLetter}
// tools for generating a random ECS instance password
// from 8 to 30 char MUST contain digit upper, case letter and upper case letter
// http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance
func GenerateRandomECSPassword() string {
// [8, 30]
l := int(randUint32()%23 + 8)
n := randUint32Slice(l)
b := make([]byte, l)
b[0] = toDigit(n[0])
b[1] = toLowerLetter(n[1])
b[2] = toUpperLetter(n[2])
for i := 3; i < l; i++ {
b[i] = convFuncs[n[i]%3](n[i])
}
s := make([]byte, l)
perm := rand.Perm(l)
for i, v := range perm {
s[v] = b[i]
}
return string(s)
}

View File

@ -1 +0,0 @@
build/root/BUILD.root