misc: various nits in doc blocks

Signed-off-by: Hidde Beydals <hidde@hhh.computer>
This commit is contained in:
Hidde Beydals 2023-03-28 23:41:01 +02:00
parent f65e26173e
commit 1023315cd2
No known key found for this signature in database
GPG Key ID: 979F380FC2341744
2 changed files with 42 additions and 43 deletions

View File

@ -53,6 +53,7 @@ import (
rreconcile "github.com/fluxcd/pkg/runtime/reconcile" rreconcile "github.com/fluxcd/pkg/runtime/reconcile"
"github.com/fluxcd/pkg/sourceignore" "github.com/fluxcd/pkg/sourceignore"
sourcev1 "github.com/fluxcd/source-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1"
serror "github.com/fluxcd/source-controller/internal/error" serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/features" "github.com/fluxcd/source-controller/internal/features"
@ -381,8 +382,8 @@ func (r *GitRepositoryReconciler) shouldNotify(oldObj, newObj *sourcev1.GitRepos
// it is removed from the object. // it is removed from the object.
// If the object does not have an Artifact in its Status, a Reconciling // If the object does not have an Artifact in its Status, a Reconciling
// condition is added. // condition is added.
// The hostname of any URL in the Status of the object are updated, to ensure // The hostname of the Artifact in the Status of the object is updated, to
// they match the Storage server hostname of current runtime. // ensure it matches the Storage server hostname of current runtime.
func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.GitRepository, _ *git.Commit, _ *artifactSet, _ string) (sreconcile.Result, error) { obj *sourcev1.GitRepository, _ *git.Commit, _ *artifactSet, _ string) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage // Garbage collect previous advertised artifact(s) from storage
@ -606,8 +607,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
// Source ignore patterns are loaded, and the given directory is archived while // Source ignore patterns are loaded, and the given directory is archived while
// taking these patterns into account. // taking these patterns into account.
// On a successful archive, the Artifact, Includes, observed ignore, recurse // On a successful archive, the Artifact, Includes, observed ignore, recurse
// submodules and observed include in the Status of the object are set, and the // submodules and observed include in the Status of the object are set.
// symlink in the Storage is updated to its path.
func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {

View File

@ -39,7 +39,7 @@ import (
"github.com/fluxcd/pkg/sourceignore" "github.com/fluxcd/pkg/sourceignore"
"github.com/fluxcd/pkg/untar" "github.com/fluxcd/pkg/untar"
sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/api/v1"
intdigest "github.com/fluxcd/source-controller/internal/digest" intdigest "github.com/fluxcd/source-controller/internal/digest"
sourcefs "github.com/fluxcd/source-controller/internal/fs" sourcefs "github.com/fluxcd/source-controller/internal/fs"
) )
@ -47,9 +47,9 @@ import (
const GarbageCountLimit = 1000 const GarbageCountLimit = 1000
const ( const (
// defaultFileMode is the permission mode applied to all files inside of an artifact archive. // defaultFileMode is the permission mode applied to all files inside an artifact archive.
defaultFileMode int64 = 0o644 defaultFileMode int64 = 0o644
// defaultDirMode is the permission mode applied to all directories inside of an artifact archive. // defaultDirMode is the permission mode applied to all directories inside an artifact archive.
defaultDirMode int64 = 0o755 defaultDirMode int64 = 0o755
) )
@ -83,10 +83,10 @@ func NewStorage(basePath string, hostname string, artifactRetentionTTL time.Dura
}, nil }, nil
} }
// NewArtifactFor returns a new v1beta1.Artifact. // NewArtifactFor returns a new v1.Artifact.
func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) sourcev1.Artifact { func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) v1.Artifact {
path := sourcev1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName) path := v1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName)
artifact := sourcev1.Artifact{ artifact := v1.Artifact{
Path: path, Path: path,
Revision: revision, Revision: revision,
} }
@ -94,8 +94,8 @@ func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision,
return artifact return artifact
} }
// SetArtifactURL sets the URL on the given v1beta1.Artifact. // SetArtifactURL sets the URL on the given v1.Artifact.
func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) { func (s Storage) SetArtifactURL(artifact *v1.Artifact) {
if artifact.Path == "" { if artifact.Path == "" {
return return
} }
@ -116,14 +116,14 @@ func (s Storage) SetHostname(URL string) string {
return u.String() return u.String()
} }
// MkdirAll calls os.MkdirAll for the given v1beta1.Artifact base dir. // MkdirAll calls os.MkdirAll for the given v1.Artifact base dir.
func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error { func (s *Storage) MkdirAll(artifact v1.Artifact) error {
dir := filepath.Dir(s.LocalPath(artifact)) dir := filepath.Dir(s.LocalPath(artifact))
return os.MkdirAll(dir, 0o700) return os.MkdirAll(dir, 0o700)
} }
// RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir. // RemoveAll calls os.RemoveAll for the given v1.Artifact base dir.
func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) { func (s *Storage) RemoveAll(artifact v1.Artifact) (string, error) {
var deletedDir string var deletedDir string
dir := filepath.Dir(s.LocalPath(artifact)) dir := filepath.Dir(s.LocalPath(artifact))
// Check if the dir exists. // Check if the dir exists.
@ -134,8 +134,8 @@ func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) {
return deletedDir, os.RemoveAll(dir) return deletedDir, os.RemoveAll(dir)
} }
// RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, excluding the current one. // RemoveAllButCurrent removes all files for the given v1.Artifact base dir, excluding the current one.
func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, error) { func (s *Storage) RemoveAllButCurrent(artifact v1.Artifact) ([]string, error) {
deletedFiles := []string{} deletedFiles := []string{}
localPath := s.LocalPath(artifact) localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath) dir := filepath.Dir(localPath)
@ -168,7 +168,7 @@ func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, err
// 1. collect all artifact files with an expired ttl // 1. collect all artifact files with an expired ttl
// 2. if we satisfy maxItemsToBeRetained, then return // 2. if we satisfy maxItemsToBeRetained, then return
// 3. else, collect all artifact files till the latest n files remain, where n=maxItemsToBeRetained // 3. else, collect all artifact files till the latest n files remain, where n=maxItemsToBeRetained
func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) (garbageFiles []string, _ error) { func (s *Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) (garbageFiles []string, _ error) {
localPath := s.LocalPath(artifact) localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath) dir := filepath.Dir(localPath)
artifactFilesWithCreatedTs := make(map[time.Time]string) artifactFilesWithCreatedTs := make(map[time.Time]string)
@ -219,7 +219,7 @@ func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, m
return garbageFiles, nil return garbageFiles, nil
} }
// sort all timestamps in an ascending order. // sort all timestamps in ascending order.
sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) }) sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) })
for _, ts := range creationTimestamps { for _, ts := range creationTimestamps {
path, ok := artifactFilesWithCreatedTs[ts] path, ok := artifactFilesWithCreatedTs[ts]
@ -233,7 +233,7 @@ func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, m
noOfGarbageFiles := len(garbageFiles) noOfGarbageFiles := len(garbageFiles)
for _, path := range sortedPaths { for _, path := range sortedPaths {
if path != localPath && filepath.Ext(path) != ".lock" && !stringInSlice(path, garbageFiles) { if path != localPath && filepath.Ext(path) != ".lock" && !stringInSlice(path, garbageFiles) {
// If we previously collected a few garbage files with an expired ttl, then take that into account // If we previously collected some garbage files with an expired ttl, then take that into account
// when checking whether we need to remove more files to satisfy the max no. of items allowed // when checking whether we need to remove more files to satisfy the max no. of items allowed
// in the filesystem, along with the no. of files already removed in this loop. // in the filesystem, along with the no. of files already removed in this loop.
if noOfGarbageFiles > 0 { if noOfGarbageFiles > 0 {
@ -253,9 +253,9 @@ func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, m
return garbageFiles, nil return garbageFiles, nil
} }
// GarbageCollect removes all garabge files in the artifact dir according to the provided // GarbageCollect removes all garbage files in the artifact dir according to the provided
// retention options. // retention options.
func (s *Storage) GarbageCollect(ctx context.Context, artifact sourcev1.Artifact, timeout time.Duration) ([]string, error) { func (s *Storage) GarbageCollect(ctx context.Context, artifact v1.Artifact, timeout time.Duration) ([]string, error) {
delFilesChan := make(chan []string) delFilesChan := make(chan []string)
errChan := make(chan error) errChan := make(chan error)
// Abort if it takes more than the provided timeout duration. // Abort if it takes more than the provided timeout duration.
@ -316,8 +316,8 @@ func stringInSlice(a string, list []string) bool {
return false return false
} }
// ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage and is a regular file. // ArtifactExist returns a boolean indicating whether the v1.Artifact exists in storage and is a regular file.
func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool { func (s *Storage) ArtifactExist(artifact v1.Artifact) bool {
fi, err := os.Lstat(s.LocalPath(artifact)) fi, err := os.Lstat(s.LocalPath(artifact))
if err != nil { if err != nil {
return false return false
@ -343,11 +343,11 @@ func SourceIgnoreFilter(ps []gitignore.Pattern, domain []string) ArchiveFileFilt
} }
} }
// Archive atomically archives the given directory as a tarball to the given v1beta1.Artifact path, excluding // Archive atomically archives the given directory as a tarball to the given v1.Artifact path, excluding
// directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example, // directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example,
// the user and group name) is stripped from file headers. // the user and group name) is stripped from file headers.
// If successful, it sets the digest and last update time on the artifact. // If successful, it sets the digest and last update time on the artifact.
func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter ArchiveFileFilter) (err error) { func (s *Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFilter) (err error) {
if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() { if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() {
return fmt.Errorf("invalid dir path: %s", dir) return fmt.Errorf("invalid dir path: %s", dir)
} }
@ -467,9 +467,9 @@ func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter Archiv
return nil return nil
} }
// AtomicWriteFile atomically writes the io.Reader contents to the v1beta1.Artifact path. // AtomicWriteFile atomically writes the io.Reader contents to the v1.Artifact path.
// If successful, it sets the digest and last update time on the artifact. // If successful, it sets the digest and last update time on the artifact.
func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader, mode os.FileMode) (err error) { func (s *Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode os.FileMode) (err error) {
localPath := s.LocalPath(*artifact) localPath := s.LocalPath(*artifact)
tf, err := os.CreateTemp(filepath.Split(localPath)) tf, err := os.CreateTemp(filepath.Split(localPath))
if err != nil { if err != nil {
@ -509,9 +509,9 @@ func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader,
return nil return nil
} }
// Copy atomically copies the io.Reader contents to the v1beta1.Artifact path. // Copy atomically copies the io.Reader contents to the v1.Artifact path.
// If successful, it sets the digest and last update time on the artifact. // If successful, it sets the digest and last update time on the artifact.
func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error) { func (s *Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) {
localPath := s.LocalPath(*artifact) localPath := s.LocalPath(*artifact)
tf, err := os.CreateTemp(filepath.Split(localPath)) tf, err := os.CreateTemp(filepath.Split(localPath))
if err != nil { if err != nil {
@ -547,9 +547,9 @@ func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error
return nil return nil
} }
// CopyFromPath atomically copies the contents of the given path to the path of the v1beta1.Artifact. // CopyFromPath atomically copies the contents of the given path to the path of the v1.Artifact.
// If successful, the digest and last update time on the artifact is set. // If successful, the digest and last update time on the artifact is set.
func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err error) { func (s *Storage) CopyFromPath(artifact *v1.Artifact, path string) (err error) {
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {
return err return err
@ -564,7 +564,7 @@ func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err er
} }
// CopyToPath copies the contents in the (sub)path of the given artifact to the given path. // CopyToPath copies the contents in the (sub)path of the given artifact to the given path.
func (s *Storage) CopyToPath(artifact *sourcev1.Artifact, subPath, toPath string) error { func (s *Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error {
// create a tmp directory to store artifact // create a tmp directory to store artifact
tmp, err := os.MkdirTemp("", "flux-include-") tmp, err := os.MkdirTemp("", "flux-include-")
if err != nil { if err != nil {
@ -602,8 +602,8 @@ func (s *Storage) CopyToPath(artifact *sourcev1.Artifact, subPath, toPath string
return nil return nil
} }
// Symlink creates or updates a symbolic link for the given v1beta1.Artifact and returns the URL for the symlink. // Symlink creates or updates a symbolic link for the given v1.Artifact and returns the URL for the symlink.
func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string, error) { func (s *Storage) Symlink(artifact v1.Artifact, linkName string) (string, error) {
localPath := s.LocalPath(artifact) localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath) dir := filepath.Dir(localPath)
link := filepath.Join(dir, linkName) link := filepath.Join(dir, linkName)
@ -621,19 +621,18 @@ func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string,
return "", err return "", err
} }
url := fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName)) return fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName)), nil
return url, nil
} }
// Lock creates a file lock for the given v1beta1.Artifact. // Lock creates a file lock for the given v1.Artifact.
func (s *Storage) Lock(artifact sourcev1.Artifact) (unlock func(), err error) { func (s *Storage) Lock(artifact v1.Artifact) (unlock func(), err error) {
lockFile := s.LocalPath(artifact) + ".lock" lockFile := s.LocalPath(artifact) + ".lock"
mutex := lockedfile.MutexAt(lockFile) mutex := lockedfile.MutexAt(lockFile)
return mutex.Lock() return mutex.Lock()
} }
// LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath). // LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath).
func (s *Storage) LocalPath(artifact sourcev1.Artifact) string { func (s *Storage) LocalPath(artifact v1.Artifact) string {
if artifact.Path == "" { if artifact.Path == "" {
return "" return ""
} }
@ -644,7 +643,7 @@ func (s *Storage) LocalPath(artifact sourcev1.Artifact) string {
return path return path
} }
// writecounter is an implementation of io.Writer that only records the number // writeCounter is an implementation of io.Writer that only records the number
// of bytes written. // of bytes written.
type writeCounter struct { type writeCounter struct {
written int64 written int64