Merge pull request #724 from fluxcd/gitrepo-rec-fixes-2

GitRepositoryReconciler no-op clone improvements
This commit is contained in:
Sunny 2022-05-24 13:56:59 +05:30 committed by GitHub
commit a9012330d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 1328 additions and 462 deletions

View File

@ -211,6 +211,18 @@ type GitRepositoryStatus struct {
// +optional
IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
// ContentConfigChecksum is a checksum of all the configurations related to
// the content of the source artifact:
// - .spec.ignore
// - .spec.recurseSubmodules
// - .spec.included and the checksum of the included artifacts
// observed in .status.observedGeneration version of the object. This can
// be used to determine if the content of the included repository has
// changed.
// It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.
// +optional
ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}

View File

@ -653,6 +653,15 @@ spec:
- type
type: object
type: array
contentConfigChecksum:
description: 'ContentConfigChecksum is a checksum of all the configurations
related to the content of the source artifact: - .spec.ignore -
.spec.recurseSubmodules - .spec.included and the checksum of the
included artifacts observed in .status.observedGeneration version
of the object. This can be used to determine if the content of the
included repository has changed. It has the format of `<algo>:<checksum>`,
for example: `sha256:<checksum>`.'
type: string
includedArtifacts:
description: IncludedArtifacts contains a list of the last successfully
included Artifacts as instructed by GitRepositorySpec.Include.

View File

@ -18,10 +18,12 @@ package controllers
import (
"context"
"crypto/sha256"
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@ -115,6 +117,7 @@ type GitRepositoryReconciler struct {
ControllerName string
requeueDependency time.Duration
features map[string]bool
}
type GitRepositoryReconcilerOptions struct {
@ -134,6 +137,15 @@ func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts GitRepositoryReconcilerOptions) error {
r.requeueDependency = opts.DependencyRequeueInterval
if r.features == nil {
r.features = map[string]bool{}
}
// Check and enable gated features.
if oc, _ := features.Enabled(features.OptimizedGitClones); oc {
r.features[features.OptimizedGitClones] = true
}
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.GitRepository{}, builder.WithPredicates(
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
@ -183,7 +195,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques
summarize.WithReconcileError(retErr),
summarize.WithIgnoreNotFound(),
summarize.WithProcessors(
summarize.RecordContextualError,
summarize.ErrorActionHandler,
summarize.RecordReconcileReq,
),
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
@ -235,10 +247,10 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G
// Create temp dir for Git clone
tmpDir, err := util.TempDirForObj("", obj)
if err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to create temporary working directory: %w", err),
Reason: sourcev1.DirCreationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("failed to create temporary working directory: %w", err),
sourcev1.DirCreationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
@ -279,11 +291,11 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.G
return res, resErr
}
// notify emits notification related to the reconciliation.
// notify emits notification related to the result of reconciliation.
func (r *GitRepositoryReconciler) notify(oldObj, newObj *sourcev1.GitRepository, commit git.Commit, res sreconcile.Result, resErr error) {
// Notify successful reconciliation for new artifact and recovery from any
// failure.
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
// Notify successful reconciliation for new artifact, no-op reconciliation
// and recovery from any failure.
if r.shouldNotify(oldObj, newObj, res, resErr) {
annotations := map[string]string{
sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision,
sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum,
@ -294,7 +306,14 @@ func (r *GitRepositoryReconciler) notify(oldObj, newObj *sourcev1.GitRepository,
oldChecksum = oldObj.GetArtifact().Checksum
}
message := fmt.Sprintf("stored artifact for commit '%s'", commit.ShortMessage())
// A partial commit due to no-op clone doesn't contain the commit
// message information. Have separate message for it.
var message string
if git.IsConcreteCommit(commit) {
message = fmt.Sprintf("stored artifact for commit '%s'", commit.ShortMessage())
} else {
message = fmt.Sprintf("stored artifact for commit '%s'", commit.String())
}
// Notify on new artifact and failure recovery.
if oldChecksum != newObj.GetArtifact().Checksum {
@ -309,6 +328,25 @@ func (r *GitRepositoryReconciler) notify(oldObj, newObj *sourcev1.GitRepository,
}
}
// shouldNotify analyzes the result of subreconcilers and determines if a
// notification should be sent. It decides about the final informational
// notifications after the reconciliation. Failure notification and in-line
// notifications are not handled here.
func (r *GitRepositoryReconciler) shouldNotify(oldObj, newObj *sourcev1.GitRepository, res sreconcile.Result, resErr error) bool {
// Notify for successful reconciliation.
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
return true
}
// Notify for no-op reconciliation with ignore error.
if resErr != nil && res == sreconcile.ResultEmpty && newObj.Status.Artifact != nil {
// Convert to Generic error and check for ignore.
if ge, ok := resErr.(*serror.Generic); ok {
return ge.Ignore == true
}
}
return false
}
// reconcileStorage ensures the current state of the storage matches the
// desired and previously observed state.
//
@ -351,8 +389,15 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context,
// reconcileSource ensures the upstream Git repository and reference can be
// cloned and checked out using the specified configuration, and observes its
// state.
// state. It also checks if the included repositories are available for use.
//
// The included repositories are fetched and their metadata are stored. In case
// one of the included repositories isn't ready, it records
// v1beta2.IncludeUnavailableCondition=True and returns early. When all the
// included repositories are ready, it removes
// v1beta2.IncludeUnavailableCondition from the object.
// When the included artifactSet differs from the current set in the Status of
// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True.
// The repository is cloned to the given dir, using the specified configuration
// to check out the reference. In case of an error during this process
// (including transient errors), it records v1beta2.FetchFailedCondition=True
@ -367,8 +412,13 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context,
// it records v1beta2.SourceVerifiedCondition=True.
// When all the above is successful, the given Commit pointer is set to the
// commit of the checked out Git repository.
//
// If the optimized git clone feature is enabled, it checks if the remote repo
// and the local artifact are on the same revision, and no other source content
// related configurations have changed since last reconciliation. If there's a
// change, it short-circuits the whole reconciliation with an early return.
func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
obj *sourcev1.GitRepository, commit *git.Commit, _ *artifactSet, dir string) (sreconcile.Result, error) {
obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
// Configure authentication strategy to access the source
var authOpts *git.AuthOptions
var err error
@ -380,10 +430,10 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
}
var secret corev1.Secret
if err := r.Client.Get(ctx, name, &secret); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to get secret '%s': %w", name.String(), err),
Reason: sourcev1.AuthenticationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("failed to get secret '%s': %w", name.String(), err),
sourcev1.AuthenticationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Return error as the world as observed may change
return sreconcile.ResultEmpty, e
@ -396,42 +446,15 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
authOpts, err = git.AuthOptionsWithoutSecret(obj.Spec.URL)
}
if err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to configure auth strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
Reason: sourcev1.AuthenticationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("failed to configure auth strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
sourcev1.AuthenticationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Return error as the contents of the secret may change
return sreconcile.ResultEmpty, e
}
// Configure checkout strategy
checkoutOpts := git.CheckoutOptions{RecurseSubmodules: obj.Spec.RecurseSubmodules}
if ref := obj.Spec.Reference; ref != nil {
checkoutOpts.Branch = ref.Branch
checkoutOpts.Commit = ref.Commit
checkoutOpts.Tag = ref.Tag
checkoutOpts.SemVer = ref.SemVer
}
if oc, _ := features.Enabled(features.OptimizedGitClones); oc {
if artifact := obj.GetArtifact(); artifact != nil {
checkoutOpts.LastRevision = artifact.Revision
}
}
checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx,
git.Implementation(obj.Spec.GitImplementation), checkoutOpts)
if err != nil {
e := &serror.Stalling{
Err: fmt.Errorf("failed to configure checkout strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
Reason: sourcev1.GitOperationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Do not return err as recovery without changes is impossible
return sreconcile.ResultEmpty, e
}
repositoryURL := obj.Spec.URL
// managed GIT transport only affects the libgit2 implementation
if managed.Enabled() && obj.Spec.GitImplementation == sourcev1.LibGit2Implementation {
@ -459,27 +482,77 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
}
}
// Checkout HEAD of reference in object
gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel()
c, err := checkoutStrategy.Checkout(gitCtx, dir, repositoryURL, authOpts)
// Fetch the included artifact metadata.
artifacts, err := r.fetchIncludes(ctx, obj)
if err != nil {
var v git.NoChangesError
if errors.As(err, &v) {
return sreconcile.ResultSuccess,
&serror.Waiting{Err: v, Reason: v.Message, RequeueAfter: obj.GetRequeueAfter()}
}
return sreconcile.ResultEmpty, err
}
e := &serror.Event{
Err: fmt.Errorf("failed to checkout and determine revision: %w", err),
Reason: sourcev1.GitOperationFailedReason,
}
// Observe if the artifacts still match the previous included ones
if artifacts.Diff(obj.Status.IncludedArtifacts) {
message := fmt.Sprintf("included artifacts differ from last observed includes")
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", message)
conditions.MarkReconciling(obj, "IncludeChange", message)
}
// Persist the ArtifactSet.
*includes = *artifacts
var optimizedClone bool
if val, ok := r.features[features.OptimizedGitClones]; ok && val {
optimizedClone = true
}
c, err := r.gitCheckout(ctx, obj, repositoryURL, authOpts, dir, optimizedClone)
if err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to checkout and determine revision: %w", err),
sourcev1.GitOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Coin flip on transient or persistent error, return error and hope for the best
return sreconcile.ResultEmpty, e
}
// Assign the commit to the shared commit reference.
*commit = *c
// If it's a partial commit obtained from an existing artifact, check if the
// reconciliation can be skipped if other configurations have not changed.
if !git.IsConcreteCommit(*commit) {
// Calculate content configuration checksum.
if r.calculateContentConfigChecksum(obj, includes) == obj.Status.ContentConfigChecksum {
ge := serror.NewGeneric(
fmt.Errorf("no changes since last reconcilation: observed revision '%s'",
commit.String()), sourcev1.GitOperationSucceedReason,
)
ge.Notification = false
ge.Ignore = true
ge.Event = corev1.EventTypeNormal
// Remove any stale fetch failed condition.
conditions.Delete(obj, sourcev1.FetchFailedCondition)
// IMPORTANT: This must be set to ensure that the observed
// generation of this condition is updated. In case of full
// reconciliation reconcileArtifact() ensures that it's set at the
// very end.
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
"stored artifact for revision '%s'", commit.String())
// TODO: Find out if such condition setting is needed when commit
// signature verification is enabled.
return sreconcile.ResultEmpty, ge
}
// If we can't skip the reconciliation, checkout again without any
// optimization.
c, err := r.gitCheckout(ctx, obj, repositoryURL, authOpts, dir, false)
if err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to checkout and determine revision: %w", err),
sourcev1.GitOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
*commit = *c
}
ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commit.String())
conditions.Delete(obj, sourcev1.FetchFailedCondition)
@ -502,21 +575,27 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
//
// The inspection of the given data to the object is differed, ensuring any
// stale observations like v1beta2.ArtifactOutdatedCondition are removed.
// If the given Artifact and/or artifactSet (includes) do not differ from the
// object's current, it returns early.
// If the given Artifact and/or artifactSet (includes) and the content config
// checksum do not differ from the object's current, it returns early.
// Source ignore patterns are loaded, and the given directory is archived while
// taking these patterns into account.
// On a successful archive, the Artifact and Includes in the Status of the
// object are set, and the symlink in the Storage is updated to its path.
// On a successful archive, the Artifact, Includes and new content config
// checksum in the Status of the object are set, and the symlink in the Storage
// is updated to its path.
func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
// Create potential new artifact with current available metadata
artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String()))
// Calculate the content config checksum.
ccc := r.calculateContentConfigChecksum(obj, includes)
// Set the ArtifactInStorageCondition if there's no drift.
defer func() {
if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) {
if obj.GetArtifact().HasRevision(artifact.Revision) &&
!includes.Diff(obj.Status.IncludedArtifacts) &&
obj.Status.ContentConfigChecksum == ccc {
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
"stored artifact for revision '%s'", artifact.Revision)
@ -524,43 +603,45 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
}()
// The artifact is up-to-date
if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) {
if obj.GetArtifact().HasRevision(artifact.Revision) &&
!includes.Diff(obj.Status.IncludedArtifacts) &&
obj.Status.ContentConfigChecksum == ccc {
r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
return sreconcile.ResultSuccess, nil
}
// Ensure target path exists and is a directory
if f, err := os.Stat(dir); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to stat target artifact path: %w", err),
Reason: sourcev1.StatOperationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("failed to stat target artifact path: %w", err),
sourcev1.StatOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
} else if !f.IsDir() {
e := &serror.Event{
Err: fmt.Errorf("invalid target path: '%s' is not a directory", dir),
Reason: sourcev1.InvalidPathReason,
}
e := serror.NewGeneric(
fmt.Errorf("invalid target path: '%s' is not a directory", dir),
sourcev1.InvalidPathReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Ensure artifact directory exists and acquire lock
if err := r.Storage.MkdirAll(artifact); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to create artifact directory: %w", err),
Reason: sourcev1.DirCreationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("failed to create artifact directory: %w", err),
sourcev1.DirCreationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
unlock, err := r.Storage.Lock(artifact)
if err != nil {
return sreconcile.ResultEmpty, &serror.Event{
Err: fmt.Errorf("failed to acquire lock for artifact: %w", err),
Reason: meta.FailedReason,
}
return sreconcile.ResultEmpty, serror.NewGeneric(
fmt.Errorf("failed to acquire lock for artifact: %w", err),
meta.FailedReason,
)
}
defer unlock()
@ -568,10 +649,10 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
ignoreDomain := strings.Split(dir, string(filepath.Separator))
ps, err := sourceignore.LoadIgnorePatterns(dir, ignoreDomain)
if err != nil {
return sreconcile.ResultEmpty, &serror.Event{
Err: fmt.Errorf("failed to load source ignore patterns from repository: %w", err),
Reason: "SourceIgnoreError",
}
return sreconcile.ResultEmpty, serror.NewGeneric(
fmt.Errorf("failed to load source ignore patterns from repository: %w", err),
"SourceIgnoreError",
)
}
if obj.Spec.Ignore != nil {
ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...)
@ -579,10 +660,10 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
// Archive directory to storage
if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, ignoreDomain)); err != nil {
e := &serror.Event{
Err: fmt.Errorf("unable to archive artifact to storage: %w", err),
Reason: sourcev1.ArchiveOperationFailedReason,
}
e := serror.NewGeneric(
fmt.Errorf("unable to archive artifact to storage: %w", err),
sourcev1.ArchiveOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
@ -590,6 +671,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
// Record it on the object
obj.Status.Artifact = artifact.DeepCopy()
obj.Status.IncludedArtifacts = *includes
obj.Status.ContentConfigChecksum = ccc
// Update symlink on a "best effort" basis
url, err := r.Storage.Symlink(artifact, "latest.tar.gz")
@ -617,65 +699,150 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context,
obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
artifacts := make(artifactSet, len(obj.Spec.Include))
for i, incl := range obj.Spec.Include {
// Do this first as it is much cheaper than copy operations
toPath, err := securejoin.SecureJoin(dir, incl.GetToPath())
if err != nil {
e := &serror.Event{
Err: fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err),
Reason: "IllegalPath",
}
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
e := serror.NewGeneric(
fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err),
"IllegalPath",
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Retrieve the included GitRepository
dep := &sourcev1.GitRepository{}
if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil {
e := &serror.Event{
Err: fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err),
Reason: "NotFound",
// Get artifact at the same include index. The artifactSet is created
// such that the index of artifactSet matches with the index of Include.
// Hence, index is used here to pick the associated artifact from
// includes.
var artifact *sourcev1.Artifact
for j, art := range *includes {
if i == j {
artifact = art
}
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Confirm include has an artifact
if dep.GetArtifact() == nil {
e := &serror.Event{
Err: fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name),
Reason: "NoArtifact",
}
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Copy artifact (sub)contents to configured directory
if err := r.Storage.CopyToPath(dep.GetArtifact(), incl.GetFromPath(), toPath); err != nil {
// Copy artifact (sub)contents to configured directory.
if err := r.Storage.CopyToPath(artifact, incl.GetFromPath(), toPath); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err),
Reason: "CopyFailure",
}
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
}
conditions.Delete(obj, sourcev1.IncludeUnavailableCondition)
return sreconcile.ResultSuccess, nil
}
// gitCheckout builds checkout options with the given configurations and
// performs a git checkout.
func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context,
obj *sourcev1.GitRepository, repoURL string, authOpts *git.AuthOptions, dir string, optimized bool) (*git.Commit, error) {
// Configure checkout strategy.
checkoutOpts := git.CheckoutOptions{RecurseSubmodules: obj.Spec.RecurseSubmodules}
if ref := obj.Spec.Reference; ref != nil {
checkoutOpts.Branch = ref.Branch
checkoutOpts.Commit = ref.Commit
checkoutOpts.Tag = ref.Tag
checkoutOpts.SemVer = ref.SemVer
}
// Only if the object has an existing artifact in storage, attempt to
// short-circuit clone operation. reconcileStorage has already verified
// that the artifact exists.
if optimized && conditions.IsTrue(obj, sourcev1.ArtifactInStorageCondition) {
if artifact := obj.GetArtifact(); artifact != nil {
checkoutOpts.LastRevision = artifact.Revision
}
}
checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx,
git.Implementation(obj.Spec.GitImplementation), checkoutOpts)
if err != nil {
e := &serror.Stalling{
Err: fmt.Errorf("failed to configure checkout strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
Reason: sourcev1.GitOperationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Do not return err as recovery without changes is impossible.
return nil, e
}
// Checkout HEAD of reference in object
gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel()
return checkoutStrategy.Checkout(gitCtx, dir, repoURL, authOpts)
}
// fetchIncludes fetches artifact metadata of all the included repos.
func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *sourcev1.GitRepository) (*artifactSet, error) {
artifacts := make(artifactSet, len(obj.Spec.Include))
for i, incl := range obj.Spec.Include {
// Retrieve the included GitRepository.
dep := &sourcev1.GitRepository{}
if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil {
e := serror.NewWaiting(
fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err),
"NotFound",
)
e.RequeueAfter = r.requeueDependency
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return nil, e
}
// Confirm include has an artifact
if dep.GetArtifact() == nil {
e := serror.NewWaiting(
fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name),
"NoArtifact",
)
e.RequeueAfter = r.requeueDependency
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return nil, e
}
artifacts[i] = dep.GetArtifact().DeepCopy()
}
// We now know all includes are available
// We now know all the includes are available.
conditions.Delete(obj, sourcev1.IncludeUnavailableCondition)
// Observe if the artifacts still match the previous included ones
if artifacts.Diff(obj.Status.IncludedArtifacts) {
message := fmt.Sprintf("included artifacts differ from last observed includes")
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", message)
conditions.MarkReconciling(obj, "IncludeChange", message)
return &artifacts, nil
}
// calculateContentConfigChecksum calculates a checksum of all the
// configurations that result in a change in the source artifact. It can be used
// to decide if further reconciliation is needed when an artifact already exists
// for a set of configurations.
func (r *GitRepositoryReconciler) calculateContentConfigChecksum(obj *sourcev1.GitRepository, includes *artifactSet) string {
c := []byte{}
// Consider the ignore rules and recurse submodules.
if obj.Spec.Ignore != nil {
c = append(c, []byte(*obj.Spec.Ignore)...)
}
c = append(c, []byte(strconv.FormatBool(obj.Spec.RecurseSubmodules))...)
// Consider the included repository attributes.
for _, incl := range obj.Spec.Include {
c = append(c, []byte(incl.GitRepositoryRef.Name+incl.FromPath+incl.ToPath)...)
}
// Persist the artifactSet.
*includes = artifacts
return sreconcile.ResultSuccess, nil
// Consider the checksum and revision of all the included remote artifact.
// This ensures that if the included repos get updated, this checksum changes.
// NOTE: The content of an artifact may change at the same revision if the
// ignore rules change. Hence, consider both checksum and revision to
// capture changes in artifact checksum as well.
// TODO: Fix artifactSet.Diff() to consider checksum as well.
if includes != nil {
for _, incl := range *includes {
c = append(c, []byte(incl.Checksum)...)
c = append(c, []byte(incl.Revision)...)
}
}
return fmt.Sprintf("sha256:%x", sha256.Sum256(c))
}
// verifyCommitSignature verifies the signature of the given Git commit, if a
@ -700,10 +867,10 @@ func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj
}
secret := &corev1.Secret{}
if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil {
e := &serror.Event{
Err: fmt.Errorf("PGP public keys secret error: %w", err),
Reason: "VerificationError",
}
e := serror.NewGeneric(
fmt.Errorf("PGP public keys secret error: %w", err),
"VerificationError",
)
conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
@ -714,10 +881,10 @@ func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj
}
// Verify commit with GPG data from secret
if _, err := commit.Verify(keyRings...); err != nil {
e := &serror.Event{
Err: fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err),
Reason: "InvalidCommitSignature",
}
e := serror.NewGeneric(
fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err),
"InvalidCommitSignature",
)
conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error())
// Return error in the hope the secret changes
return sreconcile.ResultEmpty, e
@ -755,10 +922,10 @@ func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sour
func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error {
if !obj.DeletionTimestamp.IsZero() {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
return &serror.Event{
Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err),
Reason: "GarbageCollectionFailed",
}
return serror.NewGeneric(
fmt.Errorf("garbage collection for deleted resource failed: %w", err),
"GarbageCollectionFailed",
)
} else if deleted != "" {
r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
"garbage collected artifacts for deleted resource")
@ -769,10 +936,10 @@ func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourc
if obj.GetArtifact() != nil {
delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5)
if err != nil {
return &serror.Event{
Err: fmt.Errorf("garbage collection of artifacts failed: %w", err),
Reason: "GarbageCollectionFailed",
}
return serror.NewGeneric(
fmt.Errorf("garbage collection of artifacts failed: %w", err),
"GarbageCollectionFailed",
)
}
if len(delFiles) > 0 {
r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",

View File

@ -57,6 +57,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/features"
sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
"github.com/fluxcd/source-controller/internal/reconcile/summarize"
"github.com/fluxcd/source-controller/pkg/git"
@ -140,6 +142,7 @@ Oomb3gD/TRf/nAdVED+k81GdLzciYdUGtI71/qI47G0nMBluLRE=
=/4e+
-----END PGP PUBLIC KEY BLOCK-----
`
emptyContentConfigChecksum = "sha256:fcbcf165908dd18a9e49f7ff27810176db8e9f63b4352213741664245224f8aa"
)
var (
@ -499,6 +502,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) {
Client: builder.Build(),
EventRecorder: record.NewFakeRecorder(32),
Storage: testStorage,
features: features.FeatureGates(),
}
for _, i := range testGitImplementations {
@ -545,30 +549,35 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T)
name string
skipForImplementation string
reference *sourcev1.GitRepositoryRef
beforeFunc func(obj *sourcev1.GitRepository, latestRev string)
want sreconcile.Result
wantErr bool
wantRevision string
wantArtifactOutdated bool
}{
{
name: "Nil reference (default branch)",
want: sreconcile.ResultSuccess,
wantRevision: "master/<commit>",
name: "Nil reference (default branch)",
want: sreconcile.ResultSuccess,
wantRevision: "master/<commit>",
wantArtifactOutdated: true,
},
{
name: "Branch",
reference: &sourcev1.GitRepositoryRef{
Branch: "staging",
},
want: sreconcile.ResultSuccess,
wantRevision: "staging/<commit>",
want: sreconcile.ResultSuccess,
wantRevision: "staging/<commit>",
wantArtifactOutdated: true,
},
{
name: "Tag",
reference: &sourcev1.GitRepositoryRef{
Tag: "v0.1.0",
},
want: sreconcile.ResultSuccess,
wantRevision: "v0.1.0/<commit>",
want: sreconcile.ResultSuccess,
wantRevision: "v0.1.0/<commit>",
wantArtifactOutdated: true,
},
{
name: "Branch commit",
@ -577,8 +586,9 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T)
Branch: "staging",
Commit: "<commit>",
},
want: sreconcile.ResultSuccess,
wantRevision: "staging/<commit>",
want: sreconcile.ResultSuccess,
wantRevision: "staging/<commit>",
wantArtifactOutdated: true,
},
{
name: "Branch commit",
@ -587,32 +597,81 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T)
Branch: "staging",
Commit: "<commit>",
},
want: sreconcile.ResultSuccess,
wantRevision: "HEAD/<commit>",
want: sreconcile.ResultSuccess,
wantRevision: "HEAD/<commit>",
wantArtifactOutdated: true,
},
{
name: "SemVer",
reference: &sourcev1.GitRepositoryRef{
SemVer: "*",
},
want: sreconcile.ResultSuccess,
wantRevision: "v2.0.0/<commit>",
want: sreconcile.ResultSuccess,
wantRevision: "v2.0.0/<commit>",
wantArtifactOutdated: true,
},
{
name: "SemVer range",
reference: &sourcev1.GitRepositoryRef{
SemVer: "<v0.2.1",
},
want: sreconcile.ResultSuccess,
wantRevision: "0.2.0/<commit>",
want: sreconcile.ResultSuccess,
wantRevision: "0.2.0/<commit>",
wantArtifactOutdated: true,
},
{
name: "SemVer prerelease",
reference: &sourcev1.GitRepositoryRef{
SemVer: ">=1.0.0-0 <1.1.0-0",
},
wantRevision: "v1.0.0-alpha/<commit>",
want: sreconcile.ResultSuccess,
wantRevision: "v1.0.0-alpha/<commit>",
want: sreconcile.ResultSuccess,
wantArtifactOutdated: true,
},
{
name: "Optimized clone",
reference: &sourcev1.GitRepositoryRef{
Branch: "staging",
},
beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) {
// Add existing artifact on the object and storage.
obj.Status = sourcev1.GitRepositoryStatus{
Artifact: &sourcev1.Artifact{
Revision: "staging/" + latestRev,
Path: randStringRunes(10),
},
// Checksum with all the relevant fields unset.
ContentConfigChecksum: emptyContentConfigChecksum,
}
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo")
},
want: sreconcile.ResultEmpty,
wantErr: true,
wantRevision: "staging/<commit>",
wantArtifactOutdated: false,
},
{
name: "Optimized clone different ignore",
reference: &sourcev1.GitRepositoryRef{
Branch: "staging",
},
beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) {
// Set new ignore value.
obj.Spec.Ignore = pointer.StringPtr("foo")
// Add existing artifact on the object and storage.
obj.Status = sourcev1.GitRepositoryStatus{
Artifact: &sourcev1.Artifact{
Revision: "staging/" + latestRev,
Path: randStringRunes(10),
},
// Checksum with all the relevant fields unset.
ContentConfigChecksum: emptyContentConfigChecksum,
}
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo")
},
want: sreconcile.ResultSuccess,
wantRevision: "staging/<commit>",
wantArtifactOutdated: false,
},
}
@ -641,6 +700,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T)
Client: fakeclient.NewClientBuilder().WithScheme(runtime.NewScheme()).Build(),
EventRecorder: record.NewFakeRecorder(32),
Storage: testStorage,
features: features.FeatureGates(),
}
for _, tt := range tests {
@ -674,6 +734,10 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T)
obj := obj.DeepCopy()
obj.Spec.GitImplementation = i
if tt.beforeFunc != nil {
tt.beforeFunc(obj, headRef.Hash().String())
}
var commit git.Commit
var includes artifactSet
got, err := r.reconcileSource(ctx, obj, &commit, &includes, tmpDir)
@ -682,10 +746,10 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T)
}
g.Expect(err != nil).To(Equal(tt.wantErr))
g.Expect(got).To(Equal(tt.want))
if tt.wantRevision != "" {
if tt.wantRevision != "" && !tt.wantErr {
revision := strings.ReplaceAll(tt.wantRevision, "<commit>", headRef.Hash().String())
g.Expect(commit.String()).To(Equal(revision))
g.Expect(conditions.IsTrue(obj, sourcev1.ArtifactOutdatedCondition)).To(BeTrue())
g.Expect(conditions.IsTrue(obj, sourcev1.ArtifactOutdatedCondition)).To(Equal(tt.wantArtifactOutdated))
}
})
}
@ -744,7 +808,8 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
beforeFunc: func(obj *sourcev1.GitRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "main/revision"}
obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/revision"}}
obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/revision", Checksum: "some-checksum"}}
obj.Status.ContentConfigChecksum = "sha256:f825d11a1c5987e033d2cb36449a3b0435a6abc9b2bfdbcdcc7c49bf40e9285d"
},
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.Status.URL).To(BeEmpty())
@ -857,6 +922,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
r := &GitRepositoryReconciler{
EventRecorder: record.NewFakeRecorder(32),
Storage: testStorage,
features: features.FeatureGates(),
}
obj := &sourcev1.GitRepository{
@ -948,39 +1014,6 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) {
{name: "b", toPath: "b/", shouldExist: true},
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "IncludeChange", "included artifacts differ from last observed includes"),
*conditions.TrueCondition(meta.ReconcilingCondition, "IncludeChange", "included artifacts differ from last observed includes"),
},
},
{
name: "Include get failure makes IncludeUnavailable=True and returns error",
includes: []include{
{name: "a", toPath: "a/"},
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"),
},
},
{
name: "Include without an artifact makes IncludeUnavailable=True",
dependencies: []dependency{
{
name: "a",
withArtifact: false,
conditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"),
},
},
},
includes: []include{
{name: "a", toPath: "a/"},
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"),
},
},
{
name: "Invalid FromPath makes IncludeUnavailable=True and returns error",
@ -995,17 +1028,9 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) {
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "CopyFailure", "unpack/path: no such file or directory"),
*conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, "CopyFailure", "unpack/path: no such file or directory"),
},
},
{
name: "Outdated IncludeUnavailable is removed",
beforeFunc: func(obj *sourcev1.GitRepository) {
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "")
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@ -1042,6 +1067,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) {
EventRecorder: record.NewFakeRecorder(32),
Storage: storage,
requeueDependency: dependencyInterval,
features: features.FeatureGates(),
}
obj := &sourcev1.GitRepository{
@ -1073,6 +1099,11 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) {
var commit git.Commit
var includes artifactSet
// Build includes artifactSet.
artifactSet, err := r.fetchIncludes(ctx, obj)
g.Expect(err).ToNot(HaveOccurred())
includes = *artifactSet
got, err := r.reconcileInclude(ctx, obj, &commit, &includes, tmpDir)
g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
g.Expect(err != nil).To(Equal(tt.wantErr))
@ -1206,6 +1237,7 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) {
r := &GitRepositoryReconciler{
EventRecorder: record.NewFakeRecorder(32),
Storage: testStorage,
features: features.FeatureGates(),
}
obj := &sourcev1.GitRepository{
@ -1247,6 +1279,7 @@ func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) {
r := &GitRepositoryReconciler{
EventRecorder: record.NewFakeRecorder(32),
Storage: testStorage,
features: features.FeatureGates(),
}
obj := &sourcev1.GitRepository{
@ -1384,6 +1417,7 @@ func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) {
r := &GitRepositoryReconciler{
EventRecorder: record.NewFakeRecorder(32),
Client: builder.Build(),
features: features.FeatureGates(),
}
obj := &sourcev1.GitRepository{
@ -1525,6 +1559,7 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) {
Client: builder.Build(),
EventRecorder: record.NewFakeRecorder(32),
Storage: testStorage,
features: features.FeatureGates(),
}
key := client.ObjectKeyFromObject(obj)
@ -1773,12 +1808,25 @@ func TestGitRepositoryReconciler_statusConditions(t *testing.T) {
}
func TestGitRepositoryReconciler_notify(t *testing.T) {
concreteCommit := git.Commit{
Hash: git.Hash("some-hash"),
Message: "test commit",
Encoded: []byte("content"),
}
partialCommit := git.Commit{
Hash: git.Hash("some-hash"),
}
noopErr := serror.NewGeneric(fmt.Errorf("some no-op error"), "NoOpReason")
noopErr.Ignore = true
tests := []struct {
name string
res sreconcile.Result
resErr error
oldObjBeforeFunc func(obj *sourcev1.GitRepository)
newObjBeforeFunc func(obj *sourcev1.GitRepository)
commit git.Commit
wantEvent string
}{
{
@ -1793,7 +1841,8 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
},
wantEvent: "Normal NewArtifact stored artifact for commit",
commit: concreteCommit,
wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'",
},
{
name: "recovery from failure",
@ -1808,7 +1857,8 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
wantEvent: "Normal Succeeded stored artifact for commit",
commit: concreteCommit,
wantEvent: "Normal Succeeded stored artifact for commit 'test commit'",
},
{
name: "recovery and new artifact",
@ -1823,7 +1873,8 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
wantEvent: "Normal NewArtifact stored artifact for commit",
commit: concreteCommit,
wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'",
},
{
name: "no updates",
@ -1838,6 +1889,22 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
},
{
name: "no-op error result",
res: sreconcile.ResultEmpty,
resErr: noopErr,
oldObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
commit: partialCommit, // no-op will always result in partial commit.
wantEvent: "Normal Succeeded stored artifact for commit 'HEAD/some-hash'",
},
}
for _, tt := range tests {
@ -1857,11 +1924,9 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
reconciler := &GitRepositoryReconciler{
EventRecorder: recorder,
features: features.FeatureGates(),
}
commit := &git.Commit{
Message: "test commit",
}
reconciler.notify(oldObj, newObj, *commit, tt.res, tt.resErr)
reconciler.notify(oldObj, newObj, tt.commit, tt.res, tt.resErr)
select {
case x, ok := <-recorder.Events:
@ -1877,3 +1942,203 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
})
}
}
func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) {
type dependency struct {
name string
withArtifact bool
conditions []metav1.Condition
}
type include struct {
name string
fromPath string
toPath string
shouldExist bool
}
tests := []struct {
name string
dependencies []dependency
includes []include
beforeFunc func(obj *sourcev1.GitRepository)
wantErr bool
wantArtifactSet artifactSet
assertConditions []metav1.Condition
}{
{
name: "Existing includes",
dependencies: []dependency{
{
name: "a",
withArtifact: true,
conditions: []metav1.Condition{
*conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"),
},
},
{
name: "b",
withArtifact: true,
conditions: []metav1.Condition{
*conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"),
},
},
},
includes: []include{
{name: "a", toPath: "a/", shouldExist: true},
{name: "b", toPath: "b/", shouldExist: true},
},
wantErr: false,
wantArtifactSet: []*sourcev1.Artifact{
{Revision: "a"},
{Revision: "b"},
},
},
{
name: "Include get failure",
includes: []include{
{name: "a", toPath: "a/"},
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"),
},
},
{
name: "Include without an artifact makes IncludeUnavailable=True",
dependencies: []dependency{
{
name: "a",
withArtifact: false,
conditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"),
},
},
},
includes: []include{
{name: "a", toPath: "a/"},
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"),
},
},
{
name: "Outdated IncludeUnavailable is removed",
beforeFunc: func(obj *sourcev1.GitRepository) {
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "")
},
assertConditions: []metav1.Condition{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
var depObjs []client.Object
for _, d := range tt.dependencies {
obj := &sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: d.name,
},
Status: sourcev1.GitRepositoryStatus{
Conditions: d.conditions,
},
}
if d.withArtifact {
obj.Status.Artifact = &sourcev1.Artifact{
Path: d.name + ".tar.gz",
Revision: d.name,
LastUpdateTime: metav1.Now(),
}
}
depObjs = append(depObjs, obj)
}
builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
if len(tt.dependencies) > 0 {
builder.WithObjects(depObjs...)
}
r := &GitRepositoryReconciler{
Client: builder.Build(),
EventRecorder: record.NewFakeRecorder(32),
}
obj := &sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: "reconcile-include",
},
Spec: sourcev1.GitRepositorySpec{
Interval: metav1.Duration{Duration: interval},
},
}
for i, incl := range tt.includes {
incl := sourcev1.GitRepositoryInclude{
GitRepositoryRef: meta.LocalObjectReference{Name: incl.name},
FromPath: incl.fromPath,
ToPath: incl.toPath,
}
tt.includes[i].fromPath = incl.GetFromPath()
tt.includes[i].toPath = incl.GetToPath()
obj.Spec.Include = append(obj.Spec.Include, incl)
}
gotArtifactSet, err := r.fetchIncludes(ctx, obj)
g.Expect(err != nil).To(Equal(tt.wantErr))
g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
if !tt.wantErr && gotArtifactSet != nil {
g.Expect(gotArtifactSet.Diff(tt.wantArtifactSet)).To(BeFalse())
}
})
}
}
func TestGitRepositoryReconciler_calculateContentConfigChecksum(t *testing.T) {
g := NewWithT(t)
obj := &sourcev1.GitRepository{}
r := &GitRepositoryReconciler{}
emptyChecksum := r.calculateContentConfigChecksum(obj, nil)
g.Expect(emptyChecksum).To(Equal(emptyContentConfigChecksum))
// Ignore modified.
obj.Spec.Ignore = pointer.String("some-rule")
ignoreModChecksum := r.calculateContentConfigChecksum(obj, nil)
g.Expect(emptyChecksum).ToNot(Equal(ignoreModChecksum))
// Recurse submodules modified.
obj.Spec.RecurseSubmodules = true
submodModChecksum := r.calculateContentConfigChecksum(obj, nil)
g.Expect(ignoreModChecksum).ToNot(Equal(submodModChecksum))
// Include modified.
obj.Spec.Include = []sourcev1.GitRepositoryInclude{
{
GitRepositoryRef: meta.LocalObjectReference{Name: "foo"},
FromPath: "aaa",
ToPath: "bbb",
},
}
artifacts := &artifactSet{
&sourcev1.Artifact{Revision: "some-revision-1", Checksum: "some-checksum-1"},
}
includeModChecksum := r.calculateContentConfigChecksum(obj, artifacts)
g.Expect(submodModChecksum).ToNot(Equal(includeModChecksum))
// Artifact modified revision.
artifacts = &artifactSet{
&sourcev1.Artifact{Revision: "some-revision-2", Checksum: "some-checksum-1"},
}
artifactModChecksum := r.calculateContentConfigChecksum(obj, artifacts)
g.Expect(includeModChecksum).ToNot(Equal(artifactModChecksum))
// Artifact modified checksum.
artifacts = &artifactSet{
&sourcev1.Artifact{Revision: "some-revision-2", Checksum: "some-checksum-2"},
}
artifactCsumModChecksum := r.calculateContentConfigChecksum(obj, artifacts)
g.Expect(artifactModChecksum).ToNot(Equal(artifactCsumModChecksum))
}

View File

@ -48,6 +48,7 @@ import (
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/cache"
"github.com/fluxcd/source-controller/internal/features"
"github.com/fluxcd/source-controller/internal/helm/util"
// +kubebuilder:scaffold:imports
)
@ -211,6 +212,7 @@ func TestMain(m *testing.M) {
EventRecorder: record.NewFakeRecorder(32),
Metrics: testMetricsH,
Storage: testStorage,
features: features.FeatureGates(),
}).SetupWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err))
}

View File

@ -1656,6 +1656,26 @@ Artifacts as instructed by GitRepositorySpec.Include.</p>
</tr>
<tr>
<td>
<code>contentConfigChecksum</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>ContentConfigChecksum is a checksum of all the configurations related to
the content of the source artifact:
- .spec.ignore
- .spec.recurseSubmodules
- .spec.included and the checksum of the included artifacts
observed in .status.observedGeneration version of the object. This can
be used to determine if the content of the included repository has
changed.
It has the format of <code>&lt;algo&gt;:&lt;checksum&gt;</code>, for example: <code>sha256:&lt;checksum&gt;</code>.</p>
</td>
</tr>
<tr>
<td>
<code>ReconcileRequestStatus</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">

View File

@ -405,9 +405,12 @@ Optimized Git clones decreases resource utilization for GitRepository
reconciliations. It supports both `go-git` and `libgit2` implementations
when cloning repositories using branches or tags.
When enabled, avoids full clone operations by first checking whether
the last revision is still the same at the target repository,
and if that is so, skips the reconciliation.
When enabled, it avoids full Git clone operations by first checking whether
the revision of the last stored artifact is still the head of the remote
repository and none of the other factors that contribute to a change in the
artifact, like ignore rules and included repositories, have changed. If that is
so, the reconciliation is skipped. Else, a full reconciliation is performed as
usual.
This feature is enabled by default. It can be disabled by starting the
controller with the argument `--feature-gates=OptimizedGitClones=false`.
@ -838,6 +841,13 @@ Note that a GitRepository can be [reconciling](#reconciling-gitrepository)
while failing at the same time, for example due to a newly introduced
configuration issue in the GitRepository spec.
### Content Configuration Checksum
The source-controller calculates the SHA256 checksum of the various
configurations of the GitRepository that indicate a change in source and
records it in `.status.contentConfigChecksum`. This field is used to determine
if the source artifact needs to be rebuilt.
### Observed Generation
The source-controller reports an [observed generation][typical-status-properties]

View File

@ -16,16 +16,53 @@ limitations under the License.
package error
import "time"
import (
"time"
corev1 "k8s.io/api/core/v1"
)
// EventTypeNone indicates no error event. It can be used to disable error
// events.
const EventTypeNone = "None"
// Config is the error configuration. It is embedded in the errors and can be
// used to configure how the error should be handled. These configurations
// mostly define actions to be taken on the errors. Not all the configurations
// may apply to every error.
type Config struct {
// Event is the event type of an error. It is used to configure what type of
// event an error should result in.
// Valid values:
// - EventTypeNone
// - corev1.EventTypeNormal
// - corev1.EventTypeWarning
Event string
// Log is used to configure if an error should be logged. The log level is
// derived from the Event type.
// None event - info log
// Normal event - info log
// Warning event - error log
Log bool
// Notification is used to emit an error as a notification alert to a
// a notification service.
Notification bool
// Ignore is used to suppress the error for no-op reconciliations. It may
// be applicable to non-contextual errors only.
Ignore bool
}
// Stalling is the reconciliation stalled state error. It contains an error
// and a reason for the stalled condition.
// and a reason for the stalled condition. It is a contextual error, used to
// express the scenario which contributed to the reconciliation result.
type Stalling struct {
// Reason is the stalled condition reason string.
Reason string
// Err is the error that caused stalling. This can be used as the message in
// stalled condition.
Err error
// Config is the error handler configuration.
Config
}
// Error implements error interface.
@ -38,8 +75,26 @@ func (se *Stalling) Unwrap() error {
return se.Err
}
// NewStalling constructs a new Stalling error with default configuration.
func NewStalling(err error, reason string) *Stalling {
// Stalling errors are not returned to the runtime. Log it explicitly.
// Since this failure requires user interaction, send warning notification.
return &Stalling{
Reason: reason,
Err: err,
Config: Config{
Event: corev1.EventTypeWarning,
Log: true,
Notification: true,
},
}
}
// Event is an error event. It can be used to construct an event to be
// recorded.
// Deprecated: use Generic error with NewGeneric() for the same behavior and
// replace the RecordContextualError with ErrorActionHandler for result
// processing.
type Event struct {
// Reason is the reason for the event error.
Reason string
@ -58,7 +113,10 @@ func (ee *Event) Unwrap() error {
}
// Waiting is the reconciliation wait state error. It contains an error, wait
// duration and a reason for the wait.
// duration and a reason for the wait. It is a contextual error, used to express
// the scenario which contributed to the reconciliation result.
// It is for scenarios where a reconciliation needs to wait for something else
// to take place first.
type Waiting struct {
// RequeueAfter is the wait duration after which to requeue.
RequeueAfter time.Duration
@ -66,9 +124,11 @@ type Waiting struct {
Reason string
// Err is the error that caused the wait.
Err error
// Config is the error handler configuration.
Config
}
// Error implement error interface.
// Error implements error interface.
func (we *Waiting) Error() string {
return we.Err.Error()
}
@ -77,3 +137,53 @@ func (we *Waiting) Error() string {
func (we *Waiting) Unwrap() error {
return we.Err
}
// NewWaiting constructs a new Waiting error with default configuration.
func NewWaiting(err error, reason string) *Waiting {
// Waiting errors are not returned to the runtime. Log it explicitly.
// Since this failure results in reconciliation delay, send warning
// notification.
return &Waiting{
Reason: reason,
Err: err,
Config: Config{
Event: corev1.EventTypeNormal,
Log: true,
},
}
}
// Generic error is a generic reconcile error. It can be used in scenarios that
// don't have any special contextual meaning.
type Generic struct {
// Reason is the reason for the generic error.
Reason string
// Error is the error that caused the generic error.
Err error
// Config is the error handler configuration.
Config
}
// Error implements error interface.
func (g *Generic) Error() string {
return g.Err.Error()
}
// Unwrap returns the underlying error.
func (g *Generic) Unwrap() error {
return g.Err
}
// NewGeneric constructs a new Generic error with default configuration.
func NewGeneric(err error, reason string) *Generic {
// Since it's a error, ensure to log and send failure notification.
return &Generic{
Reason: reason,
Err: err,
Config: Config{
Event: corev1.EventTypeWarning,
Log: true,
Notification: true,
},
}
}

View File

@ -73,8 +73,19 @@ type AlwaysRequeueResultBuilder struct {
// return values of a controller's Reconcile function.
func (r AlwaysRequeueResultBuilder) BuildRuntimeResult(rr Result, err error) ctrl.Result {
// Handle special errors that contribute to expressing the result.
if e, ok := err.(*serror.Waiting); ok {
switch e := err.(type) {
case *serror.Waiting:
// Safeguard: If no RequeueAfter is set, use the default success
// RequeueAfter value to ensure a requeue takes place after some time.
if e.RequeueAfter == 0 {
return ctrl.Result{RequeueAfter: r.RequeueAfter}
}
return ctrl.Result{RequeueAfter: e.RequeueAfter}
case *serror.Generic:
// no-op error, reconcile at success interval.
if e.Ignore {
return ctrl.Result{RequeueAfter: r.RequeueAfter}
}
}
switch rr {
@ -132,6 +143,17 @@ func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, rb
conditions.Delete(obj, meta.StalledCondition)
// The reconciler needs to wait and retry. Return no error.
return pOpts, result, nil
case *serror.Generic:
conditions.Delete(obj, meta.StalledCondition)
// If ignore, it's a no-op error, return no error, remove reconciling
// condition.
if t.Ignore {
// The current generation has been reconciled successfully with
// no-op result. Update status observed generation.
pOpts = append(pOpts, patch.WithStatusObservedGeneration{})
conditions.Delete(obj, meta.ReconcilingCondition)
return pOpts, result, nil
}
case nil:
// The reconcile didn't result in any error, we are not in stalled
// state. If a requeue is requested, the current generation has not been

View File

@ -135,12 +135,46 @@ func TestComputeReconcileResult(t *testing.T) {
name: "waiting error",
result: ResultEmpty,
recErr: &serror.Waiting{Err: fmt.Errorf("some error"), Reason: "some reason"},
wantResult: ctrl.Result{},
wantResult: ctrl.Result{RequeueAfter: testSuccessInterval},
wantErr: false,
afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) {
t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse())
},
},
{
name: "generic error, Stalled=True, remove Stalled",
result: ResultEmpty,
beforeFunc: func(obj conditions.Setter) {
conditions.MarkStalled(obj, "SomeReason", "some message")
},
recErr: &serror.Generic{
Err: fmt.Errorf("some error"), Reason: "some reason",
},
wantResult: ctrl.Result{},
afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) {
t.Expect(conditions.IsUnknown(obj, meta.StalledCondition)).To(BeTrue())
},
wantErr: true,
},
{
name: "generic ignore error, Reconciling=True, remove Reconciling",
result: ResultEmpty,
beforeFunc: func(obj conditions.Setter) {
conditions.MarkReconciling(obj, "NewRevision", "new revision")
},
recErr: &serror.Generic{
Err: fmt.Errorf("some error"), Reason: "some reason",
Config: serror.Config{
Ignore: true,
},
},
wantResult: ctrl.Result{RequeueAfter: testSuccessInterval},
afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) {
t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue())
t.Expect(conditions.IsUnknown(obj, meta.ReconcilingCondition)).To(BeTrue())
},
wantErr: false,
},
{
name: "random error",
result: ResultEmpty,
@ -188,7 +222,9 @@ func TestComputeReconcileResult(t *testing.T) {
for _, o := range pOpts {
o.ApplyToHelper(opts)
}
tt.afterFunc(g, obj, opts)
if tt.afterFunc != nil {
tt.afterFunc(g, obj, opts)
}
})
}
}

View File

@ -25,6 +25,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/events"
serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/object"
"github.com/fluxcd/source-controller/internal/reconcile"
@ -64,3 +66,60 @@ func RecordReconcileReq(ctx context.Context, recorder kuberecorder.EventRecorder
object.SetStatusLastHandledReconcileAt(obj, v)
}
}
// ErrorActionHandler is a ResultProcessor that handles all the actions
// configured in the given error. Logging and event recording are the handled
// actions at present. As more configurations are added to serror.Config, more
// action handlers can be added here.
func ErrorActionHandler(ctx context.Context, recorder kuberecorder.EventRecorder, obj client.Object, _ reconcile.Result, err error) {
switch e := err.(type) {
case *serror.Generic:
if e.Log {
logError(ctx, e.Config.Event, e, e.Error())
}
recordEvent(recorder, obj, e.Config.Event, e.Config.Notification, err, e.Reason)
case *serror.Waiting:
if e.Log {
logError(ctx, e.Config.Event, e, "reconciliation waiting", "reason", e.Err, "duration", e.RequeueAfter)
}
recordEvent(recorder, obj, e.Config.Event, e.Config.Notification, err, e.Reason)
case *serror.Stalling:
if e.Log {
logError(ctx, e.Config.Event, e, "reconciliation stalled")
}
recordEvent(recorder, obj, e.Config.Event, e.Config.Notification, err, e.Reason)
}
}
// logError logs error based on the passed error configurations.
func logError(ctx context.Context, eventType string, err error, msg string, keysAndValues ...interface{}) {
switch eventType {
case corev1.EventTypeNormal, serror.EventTypeNone:
ctrl.LoggerFrom(ctx).Info(msg, keysAndValues...)
case corev1.EventTypeWarning:
ctrl.LoggerFrom(ctx).Error(err, msg, keysAndValues...)
}
}
// recordEvent records events based on the passed error configurations.
func recordEvent(recorder kuberecorder.EventRecorder, obj client.Object, eventType string, notification bool, err error, reason string) {
if eventType == serror.EventTypeNone {
return
}
switch eventType {
case corev1.EventTypeNormal:
if notification {
// K8s native event and notification-controller event.
recorder.Eventf(obj, corev1.EventTypeNormal, reason, err.Error())
} else {
// K8s native event only.
recorder.Eventf(obj, events.EventTypeTrace, reason, err.Error())
}
case corev1.EventTypeWarning:
// TODO: Due to the current implementation of the event recorder, all
// the K8s warning events are also sent as notification controller
// notifications. Once the recorder becomes capable of separating the
// two, conditionally record events.
recorder.Eventf(obj, corev1.EventTypeWarning, reason, err.Error())
}
}

View File

@ -107,14 +107,12 @@ type CheckoutStrategy interface {
Checkout(ctx context.Context, path, url string, config *AuthOptions) (*Commit, error)
}
// NoChangesError represents the case in which a Git clone operation
// is attempted, but cancelled as the revision is still the same as
// the one observed on the last successful reconciliation.
type NoChangesError struct {
Message string
ObservedRevision string
}
func (e NoChangesError) Error() string {
return fmt.Sprintf("%s: observed revision '%s'", e.Message, e.ObservedRevision)
// IsConcreteCommit returns if a given commit is a concrete commit. Concrete
// commits have most of commit metadata and commit content. In contrast, a
// partial commit may only have some metadata and no commit content.
func IsConcreteCommit(c Commit) bool {
if c.Hash != nil && c.Encoded != nil {
return true
}
return false
}

View File

@ -18,6 +18,7 @@ package git
import (
"testing"
"time"
. "github.com/onsi/gomega"
)
@ -263,3 +264,41 @@ of the commit`,
})
}
}
func TestIsConcreteCommit(t *testing.T) {
tests := []struct {
name string
commit Commit
result bool
}{
{
name: "concrete commit",
commit: Commit{
Hash: Hash("foo"),
Reference: "refs/tags/main",
Author: Signature{
Name: "user", Email: "user@example.com", When: time.Now(),
},
Committer: Signature{
Name: "user", Email: "user@example.com", When: time.Now(),
},
Signature: "signature",
Encoded: []byte("commit-content"),
Message: "commit-message",
},
result: true,
},
{
name: "partial commit",
commit: Commit{Hash: Hash("foo")},
result: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(IsConcreteCommit(tt.commit)).To(Equal(tt.result))
})
}
}

View File

@ -22,6 +22,7 @@ import (
"fmt"
"io"
"sort"
"strings"
"time"
"github.com/Masterminds/semver/v3"
@ -78,10 +79,21 @@ func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, opts *g
}
if currentRevision != "" && currentRevision == c.LastRevision {
return nil, git.NoChangesError{
Message: "no changes since last reconcilation",
ObservedRevision: currentRevision,
// Construct a partial commit with the existing information.
// Split the revision and take the last part as the hash.
// Example revision: main/43d7eb9c49cdd49b2494efd481aea1166fc22b67
var hash git.Hash
ss := strings.Split(currentRevision, "/")
if len(ss) > 1 {
hash = git.Hash(ss[len(ss)-1])
} else {
hash = git.Hash(ss[0])
}
c := &git.Commit{
Hash: hash,
Reference: plumbing.NewBranchReferenceName(c.Branch).String(),
}
return c, nil
}
}
@ -153,10 +165,21 @@ func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, opts *git.
}
if currentRevision != "" && currentRevision == c.LastRevision {
return nil, git.NoChangesError{
Message: "no changes since last reconcilation",
ObservedRevision: currentRevision,
// Construct a partial commit with the existing information.
// Split the revision and take the last part as the hash.
// Example revision: 6.1.4/bf09377bfd5d3bcac1e895fa8ce52dc76695c060
var hash git.Hash
ss := strings.Split(currentRevision, "/")
if len(ss) > 1 {
hash = git.Hash(ss[len(ss)-1])
} else {
hash = git.Hash(ss[0])
}
c := &git.Commit{
Hash: hash,
Reference: ref.String(),
}
return c, nil
}
}
repo, err := extgogit.PlainCloneContext(ctx, path, false, &extgogit.CloneOptions{

View File

@ -67,32 +67,36 @@ func TestCheckoutBranch_Checkout(t *testing.T) {
}
tests := []struct {
name string
branch string
filesCreated map[string]string
expectedCommit string
expectedErr string
lastRevision string
name string
branch string
filesCreated map[string]string
lastRevision string
expectedCommit string
expectedConcreteCommit bool
expectedErr string
}{
{
name: "Default branch",
branch: "master",
filesCreated: map[string]string{"branch": "init"},
expectedCommit: firstCommit.String(),
name: "Default branch",
branch: "master",
filesCreated: map[string]string{"branch": "init"},
expectedCommit: firstCommit.String(),
expectedConcreteCommit: true,
},
{
name: "skip clone if LastRevision hasn't changed",
branch: "master",
filesCreated: map[string]string{"branch": "init"},
expectedErr: fmt.Sprintf("no changes since last reconcilation: observed revision 'master/%s'", firstCommit.String()),
lastRevision: fmt.Sprintf("master/%s", firstCommit.String()),
name: "skip clone if LastRevision hasn't changed",
branch: "master",
filesCreated: map[string]string{"branch": "init"},
lastRevision: fmt.Sprintf("master/%s", firstCommit.String()),
expectedCommit: firstCommit.String(),
expectedConcreteCommit: false,
},
{
name: "Other branch - revision has changed",
branch: "test",
filesCreated: map[string]string{"branch": "second"},
expectedCommit: secondCommit.String(),
lastRevision: fmt.Sprintf("master/%s", firstCommit.String()),
name: "Other branch - revision has changed",
branch: "test",
filesCreated: map[string]string{"branch": "second"},
lastRevision: fmt.Sprintf("master/%s", firstCommit.String()),
expectedCommit: secondCommit.String(),
expectedConcreteCommit: true,
},
{
name: "Non existing branch",
@ -120,59 +124,65 @@ func TestCheckoutBranch_Checkout(t *testing.T) {
}
g.Expect(err).ToNot(HaveOccurred())
g.Expect(cc.String()).To(Equal(tt.branch + "/" + tt.expectedCommit))
g.Expect(git.IsConcreteCommit(*cc)).To(Equal(tt.expectedConcreteCommit))
for k, v := range tt.filesCreated {
g.Expect(filepath.Join(tmpDir, k)).To(BeARegularFile())
g.Expect(os.ReadFile(filepath.Join(tmpDir, k))).To(BeEquivalentTo(v))
if tt.expectedConcreteCommit {
for k, v := range tt.filesCreated {
g.Expect(filepath.Join(tmpDir, k)).To(BeARegularFile())
g.Expect(os.ReadFile(filepath.Join(tmpDir, k))).To(BeEquivalentTo(v))
}
}
})
}
}
func TestCheckoutTag_Checkout(t *testing.T) {
type testTag struct {
name string
annotated bool
}
tests := []struct {
name string
tag string
annotated bool
checkoutTag string
expectTag string
expectErr string
lastRev string
setLastRev bool
name string
tagsInRepo []testTag
checkoutTag string
lastRevTag string
expectConcreteCommit bool
expectErr string
}{
{
name: "Tag",
tag: "tag-1",
checkoutTag: "tag-1",
expectTag: "tag-1",
name: "Tag",
tagsInRepo: []testTag{{"tag-1", false}},
checkoutTag: "tag-1",
expectConcreteCommit: true,
},
{
name: "Skip Tag if last revision hasn't changed",
tag: "tag-2",
checkoutTag: "tag-2",
setLastRev: true,
expectErr: "no changes since last reconcilation",
name: "Annotated",
tagsInRepo: []testTag{{"annotated", true}},
checkoutTag: "annotated",
expectConcreteCommit: true,
},
{
name: "Last revision changed",
tag: "tag-3",
checkoutTag: "tag-3",
expectTag: "tag-3",
lastRev: "tag-3/<fake-hash>",
},
{
name: "Annotated",
tag: "annotated",
annotated: true,
checkoutTag: "annotated",
expectTag: "annotated",
},
{
name: "Non existing tag",
tag: "tag-1",
name: "Non existing tag",
// Without this go-git returns error "remote repository is empty".
tagsInRepo: []testTag{{"tag-1", false}},
checkoutTag: "invalid",
expectErr: "couldn't find remote ref \"refs/tags/invalid\"",
},
{
name: "Skip clone - last revision unchanged",
tagsInRepo: []testTag{{"tag-1", false}},
checkoutTag: "tag-1",
lastRevTag: "tag-1",
expectConcreteCommit: false,
},
{
name: "Last revision changed",
tagsInRepo: []testTag{{"tag-1", false}, {"tag-2", false}},
checkoutTag: "tag-2",
lastRevTag: "tag-1",
expectConcreteCommit: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@ -183,32 +193,37 @@ func TestCheckoutTag_Checkout(t *testing.T) {
t.Fatal(err)
}
var h plumbing.Hash
var tagHash *plumbing.Reference
if tt.tag != "" {
h, err = commitFile(repo, "tag", tt.tag, time.Now())
if err != nil {
t.Fatal(err)
}
tagHash, err = tag(repo, h, !tt.annotated, tt.tag, time.Now())
if err != nil {
t.Fatal(err)
// Collect tags and their associated commit hash for later
// reference.
tagCommits := map[string]string{}
// Populate the repo with commits and tags.
if tt.tagsInRepo != nil {
for _, tr := range tt.tagsInRepo {
h, err := commitFile(repo, "tag", tr.name, time.Now())
if err != nil {
t.Fatal(err)
}
_, err = tag(repo, h, tr.annotated, tr.name, time.Now())
if err != nil {
t.Fatal(err)
}
tagCommits[tr.name] = h.String()
}
}
tag := CheckoutTag{
checkoutTag := CheckoutTag{
Tag: tt.checkoutTag,
}
if tt.setLastRev {
tag.LastRevision = fmt.Sprintf("%s/%s", tt.tag, tagHash.Hash().String())
// If last revision is provided, configure it.
if tt.lastRevTag != "" {
lc := tagCommits[tt.lastRevTag]
checkoutTag.LastRevision = fmt.Sprintf("%s/%s", tt.lastRevTag, lc)
}
if tt.lastRev != "" {
tag.LastRevision = tt.lastRev
}
tmpDir := t.TempDir()
cc, err := tag.Checkout(context.TODO(), tmpDir, path, nil)
cc, err := checkoutTag.Checkout(context.TODO(), tmpDir, path, nil)
if tt.expectErr != "" {
g.Expect(err).ToNot(BeNil())
g.Expect(err.Error()).To(ContainSubstring(tt.expectErr))
@ -216,10 +231,17 @@ func TestCheckoutTag_Checkout(t *testing.T) {
return
}
// Check successful checkout results.
g.Expect(git.IsConcreteCommit(*cc)).To(Equal(tt.expectConcreteCommit))
targetTagHash := tagCommits[tt.checkoutTag]
g.Expect(err).ToNot(HaveOccurred())
g.Expect(cc.String()).To(Equal(tt.expectTag + "/" + h.String()))
g.Expect(filepath.Join(tmpDir, "tag")).To(BeARegularFile())
g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo(tt.tag))
g.Expect(cc.String()).To(Equal(tt.checkoutTag + "/" + targetTagHash))
// Check file content only when there's an actual checkout.
if tt.lastRevTag != tt.checkoutTag {
g.Expect(filepath.Join(tmpDir, "tag")).To(BeARegularFile())
g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo(tt.checkoutTag))
}
})
}
}

View File

@ -34,6 +34,8 @@ import (
"github.com/fluxcd/source-controller/pkg/git/libgit2/managed"
)
const defaultRemoteName = "origin"
// CheckoutStrategyForOptions returns the git.CheckoutStrategy for the given
// git.CheckoutOptions.
func CheckoutStrategyForOptions(ctx context.Context, opt git.CheckoutOptions) git.CheckoutStrategy {
@ -67,26 +69,43 @@ type CheckoutBranch struct {
func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) {
defer recoverPanic(&err)
repo, remote, free, err := getBlankRepoAndRemote(ctx, path, url, opts)
remoteCallBacks := RemoteCallbacks(ctx, opts)
proxyOpts := &git2go.ProxyOptions{Type: git2go.ProxyTypeAuto}
repo, remote, err := initializeRepoWithRemote(ctx, path, url, opts)
if err != nil {
return nil, err
}
defer free()
// Open remote connection.
err = remote.ConnectFetch(&remoteCallBacks, proxyOpts, nil)
if err != nil {
remote.Free()
repo.Free()
return nil, fmt.Errorf("unable to fetch-connect to remote '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
defer func() {
remote.Disconnect()
remote.Free()
repo.Free()
}()
// When the last observed revision is set, check whether it is still
// the same at the remote branch. If so, short-circuit the clone operation here.
// When the last observed revision is set, check whether it is still the
// same at the remote branch. If so, short-circuit the clone operation here.
if c.LastRevision != "" {
heads, err := remote.Ls(c.Branch)
if err != nil {
return nil, fmt.Errorf("unable to remote ls for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
if len(heads) > 0 {
currentRevision := fmt.Sprintf("%s/%s", c.Branch, heads[0].Id.String())
hash := heads[0].Id.String()
currentRevision := fmt.Sprintf("%s/%s", c.Branch, hash)
if currentRevision == c.LastRevision {
return nil, git.NoChangesError{
Message: "no changes since last reconciliation",
ObservedRevision: currentRevision,
// Construct a partial commit with the existing information.
c := &git.Commit{
Hash: git.Hash(hash),
Reference: "refs/heads/" + c.Branch,
}
return c, nil
}
}
}
@ -95,7 +114,7 @@ func (c *CheckoutBranch) Checkout(ctx context.Context, path, url string, opts *g
err = remote.Fetch([]string{c.Branch},
&git2go.FetchOptions{
DownloadTags: git2go.DownloadTagsNone,
RemoteCallbacks: RemoteCallbacks(ctx, opts),
RemoteCallbacks: remoteCallBacks,
ProxyOptions: git2go.ProxyOptions{Type: git2go.ProxyTypeAuto},
},
"")
@ -151,33 +170,53 @@ type CheckoutTag struct {
func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, opts *git.AuthOptions) (_ *git.Commit, err error) {
defer recoverPanic(&err)
repo, remote, free, err := getBlankRepoAndRemote(ctx, path, url, opts)
remoteCallBacks := RemoteCallbacks(ctx, opts)
proxyOpts := &git2go.ProxyOptions{Type: git2go.ProxyTypeAuto}
repo, remote, err := initializeRepoWithRemote(ctx, path, url, opts)
if err != nil {
return nil, err
}
defer free()
// Open remote connection.
err = remote.ConnectFetch(&remoteCallBacks, proxyOpts, nil)
if err != nil {
remote.Free()
repo.Free()
return nil, fmt.Errorf("unable to fetch-connect to remote '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
defer func() {
remote.Disconnect()
remote.Free()
repo.Free()
}()
// When the last observed revision is set, check whether it is still the
// same at the remote branch. If so, short-circuit the clone operation here.
if c.LastRevision != "" {
heads, err := remote.Ls(c.Tag)
if err != nil {
return nil, fmt.Errorf("unable to remote ls for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
if len(heads) > 0 {
currentRevision := fmt.Sprintf("%s/%s", c.Tag, heads[0].Id.String())
hash := heads[0].Id.String()
currentRevision := fmt.Sprintf("%s/%s", c.Tag, hash)
var same bool
if currentRevision == c.LastRevision {
same = true
} else if len(heads) > 1 {
currentAnnotatedRevision := fmt.Sprintf("%s/%s", c.Tag, heads[1].Id.String())
hash = heads[1].Id.String()
currentAnnotatedRevision := fmt.Sprintf("%s/%s", c.Tag, hash)
if currentAnnotatedRevision == c.LastRevision {
same = true
}
}
if same {
return nil, git.NoChangesError{
Message: "no changes since last reconciliation",
ObservedRevision: currentRevision,
// Construct a partial commit with the existing information.
c := &git.Commit{
Hash: git.Hash(hash),
Reference: "refs/tags/" + c.Tag,
}
return c, nil
}
}
}
@ -185,8 +224,8 @@ func (c *CheckoutTag) Checkout(ctx context.Context, path, url string, opts *git.
err = remote.Fetch([]string{c.Tag},
&git2go.FetchOptions{
DownloadTags: git2go.DownloadTagsAuto,
RemoteCallbacks: RemoteCallbacks(ctx, opts),
ProxyOptions: git2go.ProxyOptions{Type: git2go.ProxyTypeAuto},
RemoteCallbacks: remoteCallBacks,
ProxyOptions: *proxyOpts,
},
"")
@ -408,34 +447,34 @@ func buildSignature(s *git2go.Signature) git.Signature {
}
}
// getBlankRepoAndRemote returns a newly initialized repository, and a remote connected to the provided url.
// Callers must call the returning function to free all git2go objects.
func getBlankRepoAndRemote(ctx context.Context, path, url string, opts *git.AuthOptions) (*git2go.Repository, *git2go.Remote, func(), error) {
// initializeRepoWithRemote initializes or opens a repository at the given path
// and configures it with the given remote "origin" URL. If a remote already
// exists with a different URL, it returns an error.
func initializeRepoWithRemote(ctx context.Context, path, url string, opts *git.AuthOptions) (*git2go.Repository, *git2go.Remote, error) {
repo, err := git2go.InitRepository(path, false)
if err != nil {
return nil, nil, nil, fmt.Errorf("unable to init repository for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
return nil, nil, fmt.Errorf("unable to init repository for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
remote, err := repo.Remotes.Create("origin", url)
remote, err := repo.Remotes.Create(defaultRemoteName, url)
if err != nil {
repo.Free()
return nil, nil, nil, fmt.Errorf("unable to create remote for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
// If the remote already exists, lookup the remote.
if git2go.IsErrorCode(err, git2go.ErrorCodeExists) {
remote, err = repo.Remotes.Lookup(defaultRemoteName)
if err != nil {
repo.Free()
return nil, nil, fmt.Errorf("unable to create or lookup remote '%s'", defaultRemoteName)
}
if remote.Url() != url {
repo.Free()
return nil, nil, fmt.Errorf("remote '%s' with different address '%s' already exists", defaultRemoteName, remote.Url())
}
} else {
repo.Free()
return nil, nil, fmt.Errorf("unable to create remote for '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
}
callBacks := RemoteCallbacks(ctx, opts)
err = remote.ConnectFetch(&callBacks, &git2go.ProxyOptions{Type: git2go.ProxyTypeAuto}, nil)
if err != nil {
remote.Free()
repo.Free()
return nil, nil, nil, fmt.Errorf("unable to fetch-connect to remote '%s': %w", managed.EffectiveURL(url), gitutil.LibGit2Error(err))
}
free := func() {
remote.Disconnect()
remote.Free()
repo.Free()
}
return repo, remote, free, nil
return repo, remote, nil
}
func recoverPanic(err *error) {

View File

@ -25,6 +25,7 @@ import (
"testing"
"time"
"github.com/fluxcd/source-controller/pkg/git"
git2go "github.com/libgit2/git2go/v33"
. "github.com/onsi/gomega"
)
@ -76,44 +77,49 @@ func TestCheckoutBranch_Checkout(t *testing.T) {
}
tests := []struct {
name string
branch string
filesCreated map[string]string
expectedCommit string
expectedErr string
lastRevision string
name string
branch string
filesCreated map[string]string
lastRevision string
expectedCommit string
expectedConcreteCommit bool
expectedErr string
}{
{
name: "Default branch",
branch: defaultBranch,
filesCreated: map[string]string{"branch": "second"},
expectedCommit: secondCommit.String(),
name: "Default branch",
branch: defaultBranch,
filesCreated: map[string]string{"branch": "second"},
expectedCommit: secondCommit.String(),
expectedConcreteCommit: true,
},
{
name: "Other branch",
branch: "test",
filesCreated: map[string]string{"branch": "init"},
expectedCommit: firstCommit.String(),
name: "Other branch",
branch: "test",
filesCreated: map[string]string{"branch": "init"},
expectedCommit: firstCommit.String(),
expectedConcreteCommit: true,
},
{
name: "Non existing branch",
branch: "invalid",
expectedErr: "reference 'refs/remotes/origin/invalid' not found",
name: "Non existing branch",
branch: "invalid",
expectedErr: "reference 'refs/remotes/origin/invalid' not found",
expectedConcreteCommit: true,
},
{
name: "skip clone - lastRevision hasn't changed",
branch: defaultBranch,
filesCreated: map[string]string{"branch": "second"},
expectedCommit: secondCommit.String(),
lastRevision: fmt.Sprintf("%s/%s", defaultBranch, secondCommit.String()),
expectedErr: fmt.Sprintf("no changes since last reconciliation: observed revision '%s/%s'", defaultBranch, secondCommit.String()),
name: "skip clone - lastRevision hasn't changed",
branch: defaultBranch,
filesCreated: map[string]string{"branch": "second"},
lastRevision: fmt.Sprintf("%s/%s", defaultBranch, secondCommit.String()),
expectedCommit: secondCommit.String(),
expectedConcreteCommit: false,
},
{
name: "lastRevision is different",
branch: defaultBranch,
filesCreated: map[string]string{"branch": "second"},
expectedCommit: secondCommit.String(),
lastRevision: fmt.Sprintf("%s/%s", defaultBranch, firstCommit.String()),
name: "lastRevision is different",
branch: defaultBranch,
filesCreated: map[string]string{"branch": "second"},
lastRevision: fmt.Sprintf("%s/%s", defaultBranch, firstCommit.String()),
expectedCommit: secondCommit.String(),
expectedConcreteCommit: true,
},
}
@ -136,37 +142,43 @@ func TestCheckoutBranch_Checkout(t *testing.T) {
}
g.Expect(err).ToNot(HaveOccurred())
g.Expect(cc.String()).To(Equal(tt.branch + "/" + tt.expectedCommit))
g.Expect(git.IsConcreteCommit(*cc)).To(Equal(tt.expectedConcreteCommit))
for k, v := range tt.filesCreated {
g.Expect(filepath.Join(tmpDir, k)).To(BeARegularFile())
g.Expect(os.ReadFile(filepath.Join(tmpDir, k))).To(BeEquivalentTo(v))
if tt.expectedConcreteCommit {
for k, v := range tt.filesCreated {
g.Expect(filepath.Join(tmpDir, k)).To(BeARegularFile())
g.Expect(os.ReadFile(filepath.Join(tmpDir, k))).To(BeEquivalentTo(v))
}
}
})
}
}
func TestCheckoutTag_Checkout(t *testing.T) {
type testTag struct {
name string
annotated bool
}
tests := []struct {
name string
tag string
annotated bool
checkoutTag string
expectTag string
expectErr string
lastRevision bool
name string
tagsInRepo []testTag
checkoutTag string
lastRevTag string
expectErr string
expectConcreteCommit bool
}{
{
name: "Tag",
tag: "tag-1",
checkoutTag: "tag-1",
expectTag: "tag-1",
name: "Tag",
tagsInRepo: []testTag{{"tag-1", false}},
checkoutTag: "tag-1",
expectConcreteCommit: true,
},
{
name: "Annotated",
tag: "annotated",
annotated: true,
checkoutTag: "annotated",
expectTag: "annotated",
name: "Annotated",
tagsInRepo: []testTag{{"annotated", true}},
checkoutTag: "annotated",
expectConcreteCommit: true,
},
{
name: "Non existing tag",
@ -174,19 +186,18 @@ func TestCheckoutTag_Checkout(t *testing.T) {
expectErr: "unable to find 'invalid': no reference found for shorthand 'invalid'",
},
{
name: "skip clone - last revision is unchanged",
tag: "tag-1",
checkoutTag: "tag-1",
expectTag: "tag-1",
lastRevision: true,
expectErr: "no changes since last reconciliation",
name: "Skip clone - last revision unchanged",
tagsInRepo: []testTag{{"tag-1", false}},
checkoutTag: "tag-1",
lastRevTag: "tag-1",
expectConcreteCommit: false,
},
{
name: "last revision changed",
tag: "tag-1",
checkoutTag: "tag-1",
expectTag: "tag-2",
lastRevision: true,
name: "Last revision changed",
tagsInRepo: []testTag{{"tag-1", false}, {"tag-2", false}},
checkoutTag: "tag-2",
lastRevTag: "tag-1",
expectConcreteCommit: true,
},
}
for _, tt := range tests {
@ -199,68 +210,57 @@ func TestCheckoutTag_Checkout(t *testing.T) {
}
defer repo.Free()
var commit *git2go.Commit
if tt.tag != "" {
c, err := commitFile(repo, "tag", tt.tag, time.Now())
if err != nil {
t.Fatal(err)
}
if commit, err = repo.LookupCommit(c); err != nil {
t.Fatal(err)
}
_, err = tag(repo, commit.Id(), !tt.annotated, tt.tag, time.Now())
if err != nil {
t.Fatal(err)
}
}
// Collect tags and their associated commit for later reference.
tagCommits := map[string]*git2go.Commit{}
checkoutTag := CheckoutTag{
Tag: tt.checkoutTag,
}
tmpDir := t.TempDir()
cc, err := checkoutTag.Checkout(context.TODO(), tmpDir, repo.Path(), nil)
if tt.expectErr != "" {
if tt.lastRevision {
tmpDir, _ = os.MkdirTemp("", "test")
defer os.RemoveAll(tmpDir)
checkoutTag.LastRevision = cc.String()
cc, err = checkoutTag.Checkout(context.TODO(), tmpDir, repo.Path(), nil)
}
g.Expect(err).To(HaveOccurred())
g.Expect(err.Error()).To(ContainSubstring(tt.expectErr))
g.Expect(cc).To(BeNil())
return
}
if tt.lastRevision {
checkoutTag.LastRevision = fmt.Sprintf("%s/%s", tt.tag, commit.Id().String())
checkoutTag.Tag = tt.expectTag
if tt.tag != "" {
c, err := commitFile(repo, "tag", "changed tag", time.Now())
// Populate the repo with commits and tags.
if tt.tagsInRepo != nil {
for _, tr := range tt.tagsInRepo {
var commit *git2go.Commit
c, err := commitFile(repo, "tag", tr.name, time.Now())
if err != nil {
t.Fatal(err)
}
if commit, err = repo.LookupCommit(c); err != nil {
t.Fatal(err)
}
_, err = tag(repo, commit.Id(), !tt.annotated, tt.expectTag, time.Now())
_, err = tag(repo, commit.Id(), tr.annotated, tr.name, time.Now())
if err != nil {
t.Fatal(err)
}
tmpDir, _ = os.MkdirTemp("", "test")
defer os.RemoveAll(tmpDir)
cc, err = checkoutTag.Checkout(context.TODO(), tmpDir, repo.Path(), nil)
tagCommits[tr.name] = commit
}
}
checkoutTag := CheckoutTag{
Tag: tt.checkoutTag,
}
// If last revision is provided, configure it.
if tt.lastRevTag != "" {
lc := tagCommits[tt.lastRevTag]
checkoutTag.LastRevision = fmt.Sprintf("%s/%s", tt.lastRevTag, lc.Id().String())
}
tmpDir := t.TempDir()
cc, err := checkoutTag.Checkout(context.TODO(), tmpDir, repo.Path(), nil)
if tt.expectErr != "" {
g.Expect(err).To(HaveOccurred())
g.Expect(err.Error()).To(ContainSubstring(tt.expectErr))
g.Expect(cc).To(BeNil())
return
}
// Check successful checkout results.
g.Expect(git.IsConcreteCommit(*cc)).To(Equal(tt.expectConcreteCommit))
targetTagCommit := tagCommits[tt.checkoutTag]
g.Expect(err).ToNot(HaveOccurred())
g.Expect(cc.String()).To(Equal(tt.expectTag + "/" + commit.Id().String()))
g.Expect(filepath.Join(tmpDir, "tag")).To(BeARegularFile())
if tt.lastRevision {
g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo("changed tag"))
} else {
g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo(tt.tag))
g.Expect(cc.String()).To(Equal(tt.checkoutTag + "/" + targetTagCommit.Id().String()))
// Check file content only when there's an actual checkout.
if tt.lastRevTag != tt.checkoutTag {
g.Expect(filepath.Join(tmpDir, "tag")).To(BeARegularFile())
g.Expect(os.ReadFile(filepath.Join(tmpDir, "tag"))).To(BeEquivalentTo(tt.checkoutTag))
}
})
}
@ -510,3 +510,37 @@ func mockSignature(time time.Time) *git2go.Signature {
When: time,
}
}
func TestInitializeRepoWithRemote(t *testing.T) {
g := NewWithT(t)
tmp := t.TempDir()
ctx := context.TODO()
testRepoURL := "https://example.com/foo/bar"
testRepoURL2 := "https://example.com/foo/baz"
authOpts, err := git.AuthOptionsWithoutSecret(testRepoURL)
g.Expect(err).ToNot(HaveOccurred())
authOpts2, err := git.AuthOptionsWithoutSecret(testRepoURL2)
g.Expect(err).ToNot(HaveOccurred())
// Fresh initialization.
repo, remote, err := initializeRepoWithRemote(ctx, tmp, testRepoURL, authOpts)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(repo.IsBare()).To(BeFalse())
g.Expect(remote.Name()).To(Equal(defaultRemoteName))
g.Expect(remote.Url()).To(Equal(testRepoURL))
remote.Free()
repo.Free()
// Reinitialize to ensure it reuses the existing origin.
repo, remote, err = initializeRepoWithRemote(ctx, tmp, testRepoURL, authOpts)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(repo.IsBare()).To(BeFalse())
g.Expect(remote.Name()).To(Equal(defaultRemoteName))
g.Expect(remote.Url()).To(Equal(testRepoURL))
remote.Free()
repo.Free()
// Reinitialize with a different remote URL for existing origin.
_, _, err = initializeRepoWithRemote(ctx, tmp, testRepoURL2, authOpts2)
g.Expect(err).To(HaveOccurred())
}

View File

@ -49,8 +49,7 @@ type CheckoutOptions struct {
// not supported by all Implementations.
RecurseSubmodules bool
// LastRevision holds the revision observed on the last successful
// reconciliation.
// LastRevision holds the last observed revision of the local repository.
// It is used to skip clone operations when no changes were detected.
LastRevision string
}