mirror of https://github.com/kubernetes/kops.git
VFS: WriteFile takes an io.ReadSeeker
Means we don't have to buffer big files in memory, in combination with WriteTo for reading.
This commit is contained in:
parent
3434d6d999
commit
412cf377c2
|
@ -149,7 +149,7 @@ func RunToolboxBundle(context Factory, out io.Writer, options *ToolboxBundleOpti
|
|||
}
|
||||
p := root.Join("etc", "kubernetes", "bootstrap", file.Header.Name)
|
||||
glog.Infof("writing %s", p)
|
||||
if err := p.WriteFile(file.Data, sshAcl); err != nil {
|
||||
if err := p.WriteFile(bytes.NewReader(file.Data), sshAcl); err != nil {
|
||||
return fmt.Errorf("error writing file %q: %v", file.Header.Name, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package registry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -81,10 +82,11 @@ func WriteConfigDeprecated(cluster *kops.Cluster, configPath vfs.Path, config in
|
|||
return err
|
||||
}
|
||||
|
||||
rs := bytes.NewReader(data)
|
||||
if create {
|
||||
err = configPath.CreateFile(data, acl)
|
||||
err = configPath.CreateFile(rs, acl)
|
||||
} else {
|
||||
err = configPath.WriteFile(data, acl)
|
||||
err = configPath.WriteFile(rs, acl)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing configuration file %s: %v", configPath, err)
|
||||
|
|
|
@ -162,10 +162,11 @@ func (c *commonVFS) writeConfig(cluster *kops.Cluster, configPath vfs.Path, o ru
|
|||
return err
|
||||
}
|
||||
|
||||
rs := bytes.NewReader(data)
|
||||
if create {
|
||||
err = configPath.CreateFile(data, acl)
|
||||
err = configPath.CreateFile(rs, acl)
|
||||
} else {
|
||||
err = configPath.WriteFile(data, acl)
|
||||
err = configPath.WriteFile(rs, acl)
|
||||
}
|
||||
if err != nil {
|
||||
if create && os.IsExist(err) {
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package components
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
|
@ -97,7 +98,7 @@ func TestImage(t *testing.T) {
|
|||
t.Errorf("error building vfs path for %s: %v", k, err)
|
||||
continue
|
||||
}
|
||||
if err := p.WriteFile([]byte(v), nil); err != nil {
|
||||
if err := p.WriteFile(bytes.NewReader([]byte(v)), nil); err != nil {
|
||||
t.Errorf("error writing vfs path %s: %v", k, err)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -49,11 +49,11 @@ func (p *AssetPath) Join(relativePath ...string) vfs.Path {
|
|||
return &AssetPath{location: joined}
|
||||
}
|
||||
|
||||
func (p *AssetPath) WriteFile(data []byte, acl vfs.ACL) error {
|
||||
func (p *AssetPath) WriteFile(data io.ReadSeeker, acl vfs.ACL) error {
|
||||
return ReadOnlyError
|
||||
}
|
||||
|
||||
func (p *AssetPath) CreateFile(data []byte, acl vfs.ACL) error {
|
||||
func (p *AssetPath) CreateFile(data io.ReadSeeker, acl vfs.ACL) error {
|
||||
return ReadOnlyError
|
||||
}
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ func writeFile(c *fi.Context, p vfs.Path, data []byte) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err = p.WriteFile(data, acl); err != nil {
|
||||
if err = p.WriteFile(bytes.NewReader(data), acl); err != nil {
|
||||
return fmt.Errorf("error writing path %v: %v", p, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package fitasks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
|
@ -96,7 +97,7 @@ func (_ *ManagedFile) Render(c *fi.Context, a, e, changes *ManagedFile) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = p.WriteFile(data, acl)
|
||||
err = p.WriteFile(bytes.NewReader(data), acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating ManagedFile %q: %v", location, err)
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package secrets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
@ -89,7 +90,7 @@ func (c *ClientsetSecretStore) MirrorTo(basedir vfs.Path) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := p.WriteFile(data, acl); err != nil {
|
||||
if err := p.WriteFile(bytes.NewReader(data), acl); err != nil {
|
||||
return fmt.Errorf("error writing secret to %q: %v", p, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package secrets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -188,8 +189,9 @@ func (c *VFSSecretStore) createSecret(s *fi.Secret, p vfs.Path, acl vfs.ACL, rep
|
|||
return fmt.Errorf("error serializing secret: %v", err)
|
||||
}
|
||||
|
||||
rs := bytes.NewReader(data)
|
||||
if replace {
|
||||
return p.WriteFile(data, acl)
|
||||
return p.WriteFile(rs, acl)
|
||||
}
|
||||
return p.CreateFile(data, acl)
|
||||
return p.CreateFile(rs, acl)
|
||||
}
|
||||
|
|
|
@ -306,7 +306,7 @@ func (c *VFSCAStore) writeKeysetBundle(p vfs.Path, name string, keyset *keyset,
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.WriteFile(objectData, acl)
|
||||
return p.WriteFile(bytes.NewReader(objectData), acl)
|
||||
}
|
||||
|
||||
// serializeKeysetBundle converts a keyset bundle to yaml, for writing to VFS
|
||||
|
@ -660,7 +660,7 @@ func mirrorKeyset(cluster *kops.Cluster, basedir vfs.Path, keyset *kops.Keyset)
|
|||
return err
|
||||
}
|
||||
|
||||
err = p.WriteFile(data, acl)
|
||||
err = p.WriteFile(bytes.NewReader(data), acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing %q: %v", p, err)
|
||||
}
|
||||
|
@ -677,7 +677,7 @@ func mirrorKeyset(cluster *kops.Cluster, basedir vfs.Path, keyset *kops.Keyset)
|
|||
return err
|
||||
}
|
||||
|
||||
err = p.WriteFile(data, acl)
|
||||
err = p.WriteFile(bytes.NewReader(data), acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing %q: %v", p, err)
|
||||
}
|
||||
|
@ -703,7 +703,7 @@ func mirrorSSHCredential(cluster *kops.Cluster, basedir vfs.Path, sshCredential
|
|||
return err
|
||||
}
|
||||
|
||||
err = p.WriteFile([]byte(sshCredential.Spec.PublicKey), acl)
|
||||
err = p.WriteFile(bytes.NewReader([]byte(sshCredential.Spec.PublicKey)), acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing %q: %v", p, err)
|
||||
}
|
||||
|
@ -970,7 +970,7 @@ func (c *VFSCAStore) storePrivateKey(name string, ki *keysetItem) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.WriteFile(data.Bytes(), acl)
|
||||
return p.WriteFile(bytes.NewReader(data.Bytes()), acl)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1012,7 +1012,7 @@ func (c *VFSCAStore) storeCertificate(name string, ki *keysetItem) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.WriteFile(data.Bytes(), acl)
|
||||
return p.WriteFile(bytes.NewReader(data.Bytes()), acl)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1094,7 +1094,7 @@ func (c *VFSCAStore) AddSSHPublicKey(name string, pubkey []byte) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return p.WriteFile(pubkey, acl)
|
||||
return p.WriteFile(bytes.NewReader(pubkey), acl)
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) buildSSHPublicKeyPath(name string, id string) vfs.Path {
|
||||
|
|
|
@ -46,7 +46,7 @@ func (p *FSPath) Join(relativePath ...string) Path {
|
|||
return &FSPath{location: joined}
|
||||
}
|
||||
|
||||
func (p *FSPath) WriteFile(data []byte, acl ACL) error {
|
||||
func (p *FSPath) WriteFile(data io.ReadSeeker, acl ACL) error {
|
||||
dir := path.Dir(p.location)
|
||||
err := os.MkdirAll(dir, 0755)
|
||||
if err != nil {
|
||||
|
@ -61,10 +61,7 @@ func (p *FSPath) WriteFile(data []byte, acl ACL) error {
|
|||
// Note from here on in we have to close f and delete or rename the temp file
|
||||
tempfile := f.Name()
|
||||
|
||||
n, err := f.Write(data)
|
||||
if err == nil && n < len(data) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
_, err = io.Copy(f, data)
|
||||
|
||||
if closeErr := f.Close(); err == nil {
|
||||
err = closeErr
|
||||
|
@ -95,7 +92,7 @@ func (p *FSPath) WriteFile(data []byte, acl ACL) error {
|
|||
// TODO: should we take a file lock or equivalent here? Can we use RENAME_NOREPLACE ?
|
||||
var createFileLock sync.Mutex
|
||||
|
||||
func (p *FSPath) CreateFile(data []byte, acl ACL) error {
|
||||
func (p *FSPath) CreateFile(data io.ReadSeeker, acl ACL) error {
|
||||
createFileLock.Lock()
|
||||
defer createFileLock.Unlock()
|
||||
|
||||
|
|
|
@ -135,11 +135,11 @@ func (p *GSPath) Join(relativePath ...string) Path {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *GSPath) WriteFile(data []byte, acl ACL) error {
|
||||
func (p *GSPath) WriteFile(data io.ReadSeeker, acl ACL) error {
|
||||
done, err := RetryWithBackoff(gcsWriteBackoff, func() (bool, error) {
|
||||
glog.V(4).Infof("Writing file %q", p)
|
||||
|
||||
md5Hash, err := hashing.HashAlgorithmMD5.Hash(bytes.NewReader(data))
|
||||
md5Hash, err := hashing.HashAlgorithmMD5.Hash(data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -157,8 +157,11 @@ func (p *GSPath) WriteFile(data []byte, acl ACL) error {
|
|||
obj.Acl = gsAcl.Acl
|
||||
}
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
_, err = p.client.Objects.Insert(p.bucket, obj).Media(r).Do()
|
||||
if _, err := data.Seek(0, 0); err != nil {
|
||||
return false, fmt.Errorf("error seeking to start of data stream for write to %s: %v", p, err)
|
||||
}
|
||||
|
||||
_, err = p.client.Objects.Insert(p.bucket, obj).Media(data).Do()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error writing %s: %v", p, err)
|
||||
}
|
||||
|
@ -181,7 +184,7 @@ func (p *GSPath) WriteFile(data []byte, acl ACL) error {
|
|||
// TODO: should we enable versioning?
|
||||
var createFileLockGCS sync.Mutex
|
||||
|
||||
func (p *GSPath) CreateFile(data []byte, acl ACL) error {
|
||||
func (p *GSPath) CreateFile(data io.ReadSeeker, acl ACL) error {
|
||||
createFileLockGCS.Lock()
|
||||
defer createFileLockGCS.Unlock()
|
||||
|
||||
|
|
|
@ -79,11 +79,11 @@ func (p *KubernetesPath) Join(relativePath ...string) Path {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *KubernetesPath) WriteFile(data []byte, acl ACL) error {
|
||||
func (p *KubernetesPath) WriteFile(data io.ReadSeeker, acl ACL) error {
|
||||
return fmt.Errorf("KubernetesPath::WriteFile not supported")
|
||||
}
|
||||
|
||||
func (p *KubernetesPath) CreateFile(data []byte, acl ACL) error {
|
||||
func (p *KubernetesPath) CreateFile(data io.ReadSeeker, acl ACL) error {
|
||||
return fmt.Errorf("KubernetesPath::CreateFile not supported")
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,9 @@ limitations under the License.
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
@ -92,12 +94,16 @@ func (p *MemFSPath) Join(relativePath ...string) Path {
|
|||
return current
|
||||
}
|
||||
|
||||
func (p *MemFSPath) WriteFile(data []byte, acl ACL) error {
|
||||
func (p *MemFSPath) WriteFile(r io.ReadSeeker, acl ACL) error {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading data: %v", err)
|
||||
}
|
||||
p.contents = data
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *MemFSPath) CreateFile(data []byte, acl ACL) error {
|
||||
func (p *MemFSPath) CreateFile(data io.ReadSeeker, acl ACL) error {
|
||||
// Check if exists
|
||||
if p.contents != nil {
|
||||
return os.ErrExist
|
||||
|
|
|
@ -109,7 +109,7 @@ func (p *S3Path) Join(relativePath ...string) Path {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *S3Path) WriteFile(data []byte, aclObj ACL) error {
|
||||
func (p *S3Path) WriteFile(data io.ReadSeeker, aclObj ACL) error {
|
||||
client, err := p.client()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -121,7 +121,7 @@ func (p *S3Path) WriteFile(data []byte, aclObj ACL) error {
|
|||
sse := "AES256"
|
||||
|
||||
request := &s3.PutObjectInput{}
|
||||
request.Body = bytes.NewReader(data)
|
||||
request.Body = data
|
||||
request.Bucket = aws.String(p.bucket)
|
||||
request.Key = aws.String(p.key)
|
||||
request.ServerSideEncryption = aws.String(sse)
|
||||
|
@ -141,7 +141,7 @@ func (p *S3Path) WriteFile(data []byte, aclObj ACL) error {
|
|||
|
||||
// We don't need Content-MD5: https://github.com/aws/aws-sdk-go/issues/208
|
||||
|
||||
glog.V(8).Infof("Calling S3 PutObject Bucket=%q Key=%q SSE=%q ACL=%q BodyLen=%d", p.bucket, p.key, sse, acl, len(data))
|
||||
glog.V(8).Infof("Calling S3 PutObject Bucket=%q Key=%q SSE=%q ACL=%q", p.bucket, p.key, sse, acl)
|
||||
|
||||
_, err = client.PutObject(request)
|
||||
if err != nil {
|
||||
|
@ -161,7 +161,7 @@ func (p *S3Path) WriteFile(data []byte, aclObj ACL) error {
|
|||
// TODO: should we enable versioning?
|
||||
var createFileLockS3 sync.Mutex
|
||||
|
||||
func (p *S3Path) CreateFile(data []byte, acl ACL) error {
|
||||
func (p *S3Path) CreateFile(data io.ReadSeeker, acl ACL) error {
|
||||
createFileLockS3.Lock()
|
||||
defer createFileLockS3.Unlock()
|
||||
|
||||
|
|
|
@ -147,7 +147,7 @@ func mkdirAll(sftpClient *sftp.Client, dir string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *SSHPath) WriteFile(data []byte, acl ACL) error {
|
||||
func (p *SSHPath) WriteFile(data io.ReadSeeker, acl ACL) error {
|
||||
sftpClient, err := p.newClient()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -169,10 +169,7 @@ func (p *SSHPath) WriteFile(data []byte, acl ACL) error {
|
|||
|
||||
// Note from here on in we have to close f and delete or rename the temp file
|
||||
|
||||
n, err := f.Write(data)
|
||||
if err == nil && n < len(data) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
_, err = io.Copy(f, data)
|
||||
|
||||
if closeErr := f.Close(); err == nil {
|
||||
err = closeErr
|
||||
|
@ -230,7 +227,7 @@ func (p *SSHPath) WriteFile(data []byte, acl ACL) error {
|
|||
// Not a great approach, but fine for a single process (with low concurrency)
|
||||
var createFileLockSSH sync.Mutex
|
||||
|
||||
func (p *SSHPath) CreateFile(data []byte, acl ACL) error {
|
||||
func (p *SSHPath) CreateFile(data io.ReadSeeker, acl ACL) error {
|
||||
createFileLockSSH.Lock()
|
||||
defer createFileLockSSH.Unlock()
|
||||
|
||||
|
|
|
@ -245,10 +245,14 @@ func (p *SwiftPath) Join(relativePath ...string) Path {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *SwiftPath) WriteFile(data []byte, acl ACL) error {
|
||||
func (p *SwiftPath) WriteFile(data io.ReadSeeker, acl ACL) error {
|
||||
done, err := RetryWithBackoff(swiftWriteBackoff, func() (bool, error) {
|
||||
glog.V(4).Infof("Writing file %q", p)
|
||||
createOpts := swiftobject.CreateOpts{Content: bytes.NewReader(data)}
|
||||
if _, err := data.Seek(0, 0); err != nil {
|
||||
return false, fmt.Errorf("error seeking to start of data stream for %s: %v", p, err)
|
||||
}
|
||||
|
||||
createOpts := swiftobject.CreateOpts{Content: data}
|
||||
_, err := swiftobject.Create(p.client, p.bucket, p.key, createOpts).Extract()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error writing %s: %v", p, err)
|
||||
|
@ -272,7 +276,7 @@ func (p *SwiftPath) WriteFile(data []byte, acl ACL) error {
|
|||
// TODO: should we enable versioning?
|
||||
var createFileLockSwift sync.Mutex
|
||||
|
||||
func (p *SwiftPath) CreateFile(data []byte, acl ACL) error {
|
||||
func (p *SwiftPath) CreateFile(data io.ReadSeeker, acl ACL) error {
|
||||
createFileLockSwift.Lock()
|
||||
defer createFileLockSwift.Unlock()
|
||||
|
||||
|
|
|
@ -52,9 +52,9 @@ type Path interface {
|
|||
// As this reads the entire file into memory, consider using WriteTo for bigger files
|
||||
ReadFile() ([]byte, error)
|
||||
|
||||
WriteFile(data []byte, acl ACL) error
|
||||
WriteFile(data io.ReadSeeker, acl ACL) error
|
||||
// CreateFile writes the file contents, but only if the file does not already exist
|
||||
CreateFile(data []byte, acl ACL) error
|
||||
CreateFile(data io.ReadSeeker, acl ACL) error
|
||||
|
||||
// Remove deletes the file
|
||||
Remove() error
|
||||
|
|
|
@ -17,15 +17,15 @@ limitations under the License.
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kops/util/pkg/hashing"
|
||||
)
|
||||
|
||||
// VFSScan scans a source Path for changes files
|
||||
// VFSScan scans a source Path for changed files
|
||||
type VFSScan struct {
|
||||
Base Path
|
||||
hashes map[string]*hashing.Hash
|
||||
|
@ -148,25 +148,40 @@ func SyncDir(src *VFSScan, destBase Path) error {
|
|||
continue
|
||||
}
|
||||
|
||||
srcData, err := f.ReadFile()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading source file %q: %v", f, err)
|
||||
if err := CopyFile(f, destFile, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
destData, err := destFile.ReadFile()
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("error reading dest file %q: %v", f, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if destData == nil || !bytes.Equal(srcData, destData) {
|
||||
glog.V(2).Infof("Copying data from %s to %s", f, destFile)
|
||||
err = destFile.WriteFile(srcData, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing dest file %q: %v", f, err)
|
||||
}
|
||||
// CopyFile copies the file at src to dest. It uses a TempFile, rather than buffering in memory.
|
||||
func CopyFile(src, dest Path, acl ACL) error {
|
||||
tempFile, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating temp file: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := os.Remove(tempFile.Name()); err != nil {
|
||||
glog.Warningf("error removing temp file %q: %v", tempFile.Name(), err)
|
||||
}
|
||||
}()
|
||||
defer tempFile.Close()
|
||||
|
||||
if _, err := src.WriteTo(tempFile); err != nil {
|
||||
return fmt.Errorf("error reading source file %q: %v", src, err)
|
||||
}
|
||||
|
||||
if _, err := tempFile.Seek(0, 0); err != nil {
|
||||
return fmt.Errorf("error seeking in temp file during copy: %v", err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Copying data from %s to %s", src, dest)
|
||||
err = dest.WriteFile(tempFile, acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing dest file %q: %v", dest, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -258,30 +273,12 @@ func CopyTree(src Path, dest Path, aclOracle ACLOracle) error {
|
|||
|
||||
destFile = dest.Join(relativePath)
|
||||
|
||||
srcData, err := srcFile.ReadFile()
|
||||
acl, err := aclOracle(destFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading source file %q: %v", srcFile, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// We do still read the dest file ... unknown if we should if the destFile supported hash
|
||||
destData, err := destFile.ReadFile()
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("error reading dest file %q: %v", destFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
if destData == nil || !bytes.Equal(srcData, destData) {
|
||||
acl, err := aclOracle(destFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Copying data from %s to %s", srcFile, destFile)
|
||||
err = destFile.WriteFile(srcData, acl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing dest file %q: %v", destFile, err)
|
||||
}
|
||||
if err := CopyFile(srcFile, destFile, acl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue