mirror of https://github.com/kubernetes/kops.git
GCS paths; retry on error
The AWS SDK does this for us, I think, the GS SDK does not.
This commit is contained in:
parent
991cd8c66a
commit
83300fc39f
|
@ -26,12 +26,14 @@ import (
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
storage "google.golang.org/api/storage/v1"
|
storage "google.golang.org/api/storage/v1"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kops/util/pkg/hashing"
|
"k8s.io/kops/util/pkg/hashing"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GSPath is a vfs path for Google Cloud Storage
|
// GSPath is a vfs path for Google Cloud Storage
|
||||||
|
@ -45,6 +47,22 @@ type GSPath struct {
|
||||||
var _ Path = &GSPath{}
|
var _ Path = &GSPath{}
|
||||||
var _ HasHash = &GSPath{}
|
var _ HasHash = &GSPath{}
|
||||||
|
|
||||||
|
// gcsReadBackoff is the backoff strategy for GCS read retries
|
||||||
|
var gcsReadBackoff = wait.Backoff{
|
||||||
|
Duration: time.Second,
|
||||||
|
Factor: 1.5,
|
||||||
|
Jitter: 0.1,
|
||||||
|
Steps: 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcsWriteBackoff is the backoff strategy for GCS write retries
|
||||||
|
var gcsWriteBackoff = wait.Backoff{
|
||||||
|
Duration: time.Second,
|
||||||
|
Factor: 1.5,
|
||||||
|
Jitter: 0.1,
|
||||||
|
Steps: 5,
|
||||||
|
}
|
||||||
|
|
||||||
func NewGSPath(client *storage.Service, bucket string, key string) *GSPath {
|
func NewGSPath(client *storage.Service, bucket string, key string) *GSPath {
|
||||||
bucket = strings.TrimSuffix(bucket, "/")
|
bucket = strings.TrimSuffix(bucket, "/")
|
||||||
key = strings.TrimPrefix(key, "/")
|
key = strings.TrimPrefix(key, "/")
|
||||||
|
@ -69,14 +87,24 @@ func (p *GSPath) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *GSPath) Remove() error {
|
func (p *GSPath) Remove() error {
|
||||||
err := p.client.Objects.Delete(p.bucket, p.key).Do()
|
done, err := RetryWithBackoff(gcsWriteBackoff, func() (bool, error) {
|
||||||
|
err := p.client.Objects.Delete(p.bucket, p.key).Do()
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Check for not-exists, return os.NotExist
|
||||||
|
|
||||||
|
return false, fmt.Errorf("error deleting %s: %v", p, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: Check for not-exists, return os.NotExist
|
return err
|
||||||
|
} else if done {
|
||||||
return fmt.Errorf("error deleting %s: %v", p, err)
|
return nil
|
||||||
|
} else {
|
||||||
|
// Shouldn't happen - we always return a non-nil error with false
|
||||||
|
return wait.ErrWaitTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *GSPath) Join(relativePath ...string) Path {
|
func (p *GSPath) Join(relativePath ...string) Path {
|
||||||
|
@ -91,24 +119,34 @@ func (p *GSPath) Join(relativePath ...string) Path {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *GSPath) WriteFile(data []byte) error {
|
func (p *GSPath) WriteFile(data []byte) error {
|
||||||
glog.V(4).Infof("Writing file %q", p)
|
done, err := RetryWithBackoff(gcsWriteBackoff, func() (bool, error) {
|
||||||
|
glog.V(4).Infof("Writing file %q", p)
|
||||||
|
|
||||||
md5Hash, err := hashing.HashAlgorithmMD5.Hash(bytes.NewReader(data))
|
md5Hash, err := hashing.HashAlgorithmMD5.Hash(bytes.NewReader(data))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := &storage.Object{
|
||||||
|
Name: p.key,
|
||||||
|
Md5Hash: base64.StdEncoding.EncodeToString(md5Hash.HashValue),
|
||||||
|
}
|
||||||
|
r := bytes.NewReader(data)
|
||||||
|
_, err = p.client.Objects.Insert(p.bucket, obj).Media(r).Do()
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error writing %s: %v", p, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
} else if done {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
// Shouldn't happen - we always return a non-nil error with false
|
||||||
|
return wait.ErrWaitTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
obj := &storage.Object{
|
|
||||||
Name: p.key,
|
|
||||||
Md5Hash: base64.StdEncoding.EncodeToString(md5Hash.HashValue),
|
|
||||||
}
|
|
||||||
r := bytes.NewReader(data)
|
|
||||||
_, err = p.client.Objects.Insert(p.bucket, obj).Media(r).Do()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error writing %s: %v", p, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// To prevent concurrent creates on the same file while maintaining atomicity of writes,
|
// To prevent concurrent creates on the same file while maintaining atomicity of writes,
|
||||||
|
@ -134,88 +172,126 @@ func (p *GSPath) CreateFile(data []byte) error {
|
||||||
return p.WriteFile(data)
|
return p.WriteFile(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadFile implements Path::ReadFile
|
||||||
func (p *GSPath) ReadFile() ([]byte, error) {
|
func (p *GSPath) ReadFile() ([]byte, error) {
|
||||||
glog.V(4).Infof("Reading file %q", p)
|
var ret []byte
|
||||||
|
done, err := RetryWithBackoff(gcsReadBackoff, func() (bool, error) {
|
||||||
|
glog.V(4).Infof("Reading file %q", p)
|
||||||
|
|
||||||
response, err := p.client.Objects.Get(p.bucket, p.key).Download()
|
response, err := p.client.Objects.Get(p.bucket, p.key).Download()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isGCSNotFound(err) {
|
if isGCSNotFound(err) {
|
||||||
return nil, os.ErrNotExist
|
return true, os.ErrNotExist
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("error reading %s: %v", p, err)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("error reading %s: %v", p, err)
|
if response == nil {
|
||||||
}
|
return false, fmt.Errorf("no response returned from reading %s", p)
|
||||||
if response == nil {
|
}
|
||||||
return nil, fmt.Errorf("no response returned from reading %s", p)
|
defer response.Body.Close()
|
||||||
}
|
|
||||||
defer response.Body.Close()
|
|
||||||
|
|
||||||
d, err := ioutil.ReadAll(response.Body)
|
data, err := ioutil.ReadAll(response.Body)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error reading %s: %v", p, err)
|
||||||
|
}
|
||||||
|
ret = data
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error reading %s: %v", p, err)
|
return nil, err
|
||||||
|
} else if done {
|
||||||
|
return ret, nil
|
||||||
|
} else {
|
||||||
|
// Shouldn't happen - we always return a non-nil error with false
|
||||||
|
return nil, wait.ErrWaitTimeout
|
||||||
}
|
}
|
||||||
return d, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadDir implements Path::ReadDir
|
||||||
func (p *GSPath) ReadDir() ([]Path, error) {
|
func (p *GSPath) ReadDir() ([]Path, error) {
|
||||||
prefix := p.key
|
var ret []Path
|
||||||
if !strings.HasSuffix(prefix, "/") {
|
done, err := RetryWithBackoff(gcsReadBackoff, func() (bool, error) {
|
||||||
prefix += "/"
|
prefix := p.key
|
||||||
}
|
if !strings.HasSuffix(prefix, "/") {
|
||||||
|
prefix += "/"
|
||||||
ctx := context.Background()
|
|
||||||
var paths []Path
|
|
||||||
err := p.client.Objects.List(p.bucket).Delimiter("/").Prefix(prefix).Pages(ctx, func(page *storage.Objects) error {
|
|
||||||
for _, o := range page.Items {
|
|
||||||
child := &GSPath{
|
|
||||||
client: p.client,
|
|
||||||
bucket: p.bucket,
|
|
||||||
key: o.Name,
|
|
||||||
md5Hash: o.Md5Hash,
|
|
||||||
}
|
|
||||||
paths = append(paths, child)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
ctx := context.Background()
|
||||||
|
var paths []Path
|
||||||
|
err := p.client.Objects.List(p.bucket).Delimiter("/").Prefix(prefix).Pages(ctx, func(page *storage.Objects) error {
|
||||||
|
for _, o := range page.Items {
|
||||||
|
child := &GSPath{
|
||||||
|
client: p.client,
|
||||||
|
bucket: p.bucket,
|
||||||
|
key: o.Name,
|
||||||
|
md5Hash: o.Md5Hash,
|
||||||
|
}
|
||||||
|
paths = append(paths, child)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if isGCSNotFound(err) {
|
||||||
|
return true, os.ErrNotExist
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("error listing %s: %v", p, err)
|
||||||
|
}
|
||||||
|
glog.V(8).Infof("Listed files in %v: %v", p, paths)
|
||||||
|
ret = paths
|
||||||
|
return true, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isGCSNotFound(err) {
|
return nil, err
|
||||||
return nil, os.ErrNotExist
|
} else if done {
|
||||||
}
|
return ret, nil
|
||||||
return nil, fmt.Errorf("error listing %s: %v", p, err)
|
} else {
|
||||||
|
// Shouldn't happen - we always return a non-nil error with false
|
||||||
|
return nil, wait.ErrWaitTimeout
|
||||||
}
|
}
|
||||||
glog.V(8).Infof("Listed files in %v: %v", p, paths)
|
|
||||||
return paths, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadTree implements Path::ReadTree
|
||||||
func (p *GSPath) ReadTree() ([]Path, error) {
|
func (p *GSPath) ReadTree() ([]Path, error) {
|
||||||
// No delimiter for recursive search
|
var ret []Path
|
||||||
|
done, err := RetryWithBackoff(gcsReadBackoff, func() (bool, error) {
|
||||||
prefix := p.key
|
// No delimiter for recursive search
|
||||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
prefix := p.key
|
||||||
prefix += "/"
|
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||||
}
|
prefix += "/"
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
var paths []Path
|
|
||||||
err := p.client.Objects.List(p.bucket).Prefix(prefix).Pages(ctx, func(page *storage.Objects) error {
|
|
||||||
for _, o := range page.Items {
|
|
||||||
key := o.Name
|
|
||||||
child := &GSPath{
|
|
||||||
client: p.client,
|
|
||||||
bucket: p.bucket,
|
|
||||||
key: key,
|
|
||||||
md5Hash: o.Md5Hash,
|
|
||||||
}
|
|
||||||
paths = append(paths, child)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
ctx := context.Background()
|
||||||
|
var paths []Path
|
||||||
|
err := p.client.Objects.List(p.bucket).Prefix(prefix).Pages(ctx, func(page *storage.Objects) error {
|
||||||
|
for _, o := range page.Items {
|
||||||
|
key := o.Name
|
||||||
|
child := &GSPath{
|
||||||
|
client: p.client,
|
||||||
|
bucket: p.bucket,
|
||||||
|
key: key,
|
||||||
|
md5Hash: o.Md5Hash,
|
||||||
|
}
|
||||||
|
paths = append(paths, child)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if isGCSNotFound(err) {
|
||||||
|
return true, os.ErrNotExist
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("error listing tree %s: %v", p, err)
|
||||||
|
}
|
||||||
|
ret = paths
|
||||||
|
return true, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isGCSNotFound(err) {
|
return nil, err
|
||||||
return nil, os.ErrNotExist
|
} else if done {
|
||||||
}
|
return ret, nil
|
||||||
return nil, fmt.Errorf("error listing tree %s: %v", p, err)
|
} else {
|
||||||
|
// Shouldn't happen - we always return a non-nil error with false
|
||||||
|
return nil, wait.ErrWaitTimeout
|
||||||
}
|
}
|
||||||
return paths, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *GSPath) Base() string {
|
func (p *GSPath) Base() string {
|
||||||
|
|
Loading…
Reference in New Issue